diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 81181301895e..6dabe613c827 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -57,6 +57,8 @@ BWC_VERSION: - "7.17.6" - "7.17.7" - "7.17.8" + - "7.17.9" + - "7.17.10" - "8.0.0" - "8.0.1" - "8.1.0" @@ -78,5 +80,8 @@ BWC_VERSION: - "8.5.0" - "8.5.1" - "8.5.2" + - "8.5.3" - "8.6.0" + - "8.6.1" + - "8.6.2" - "8.7.0" diff --git a/.ci/jobs.t/elastic+elasticsearch+dra-snapshot.yml b/.ci/jobs.t/elastic+elasticsearch+dra-snapshot.yml new file mode 100644 index 000000000000..37fdd85ee656 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+dra-snapshot.yml @@ -0,0 +1,67 @@ +--- +- job: + name: elastic+elasticsearch+%BRANCH%+dra-snapshot + workspace: /dev/shm/elastic+elasticsearch+%BRANCH%+dra-snapshot + display-name: "elastic / elasticsearch # %BRANCH% - DRA snapshot" + description: "Publishing Daily Releasable Artifacts (DRAs) of Elasticsearch %BRANCH% snapshots.\n" + node: "ubuntu-20.04" + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + WORKFLOW="snapshot" + RM_BRANCH="%BRANCH%" && [[ "%BRANCH%" == "main" ]] && RM_BRANCH=master + ES_VERSION=$(cat build-tools-internal/version.properties \ + | grep elasticsearch \ + | sed "s/elasticsearch *= *//g") + VERSION_SUFFIX="" && [[ "$WORKFLOW" == "snapshot" ]] && VERSION_SUFFIX="-SNAPSHOT" + BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats $RM_BRANCH $ES_VERSION $WORKFLOW)" + ML_CPP_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh ml-cpp $RM_BRANCH $ES_VERSION $WORKFLOW)" + set -euo pipefail + set +x + VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id=$VAULT_ROLE_ID secret_id=$VAULT_SECRET_ID) + export VAULT_TOKEN + $WORKSPACE/.ci/scripts/run-gradle.sh -Ddra.artifacts=true \ + -Ddra.artifacts.dependency.beats=${BEATS_BUILD_ID} \ + -Ddra.artifacts.dependency.ml-cpp=${ML_CPP_BUILD_ID} \ + -Ddra.workflow=$WORKFLOW \ + -Dcsv=$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv \ + buildReleaseArtifacts \ + exportCompressedDockerImages \ + :distribution:generateDependenciesReport + + unset VAULT_TOKEN + set -x + $WORKSPACE/x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX" + + # we regenerate this file as part of the release manager invocation + rm $WORKSPACE/build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512 + + # Allow other users access to read the artifacts so they are readable in the + # container + find $WORKSPACE -type f -path "*/build/distributions/*" -exec chmod a+r {} \; + + # Allow other users write access to create checksum files + find $WORKSPACE -type d -path "*/build/distributions" -exec chmod a+w {} \; + + # Artifacts should be generated + docker run --rm \ + --name release-manager \ + -e VAULT_ADDR \ + -e VAULT_ROLE_ID \ + -e VAULT_SECRET_ID \ + --mount type=bind,readonly=false,src="$PWD",target=/artifacts \ + docker.elastic.co/infra/release-manager:latest \ + cli collect \ + --project elasticsearch \ + --branch "$RM_BRANCH" \ + --commit "$GIT_COMMIT" \ + --workflow "$WORKFLOW" \ + --version "$ES_VERSION" \ + --artifact-set main \ + --dependency beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json \ + --dependency ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json diff --git a/.ci/jobs.t/elastic+elasticsearch+dra-staging-trigger.yml b/.ci/jobs.t/elastic+elasticsearch+dra-staging-trigger.yml new file mode 100644 index 000000000000..87df5e07c2e4 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+dra-staging-trigger.yml @@ -0,0 +1,6 @@ +--- +jjbb-template: periodic-trigger-lgc.yml +vars: + - periodic-job: elastic+elasticsearch+%BRANCH%+dra-staging + - lgc-job: elastic+elasticsearch+%BRANCH%+intake + - cron: "H H/12 * * *" diff --git a/.ci/jobs.t/elastic+elasticsearch+dra-staging.yml b/.ci/jobs.t/elastic+elasticsearch+dra-staging.yml index d558872abeb9..40a759e20ba2 100644 --- a/.ci/jobs.t/elastic+elasticsearch+dra-staging.yml +++ b/.ci/jobs.t/elastic+elasticsearch+dra-staging.yml @@ -1,6 +1,6 @@ --- - job: - name: elastic+elasticsearch+%BRANCH%+dra-snapshot + name: elastic+elasticsearch+%BRANCH%+dra-staging workspace: /dev/shm/elastic+elasticsearch+%BRANCH%+dra-staging display-name: "elastic / elasticsearch # %BRANCH% - DRA staging" description: "Publishing Daily Releasable Artifacts (DRAs) of Elasticsearch %BRANCH% staging.\n" @@ -13,6 +13,12 @@ RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - shell: | #!/usr/local/bin/runbld --redirect-stderr + + # Don't publish main branch to staging + if [ "%BRANCH%" == "main" ]; then + exit 0 + fi + WORKFLOW="staging" RM_BRANCH="%BRANCH%" && [[ "%BRANCH%" == "main" ]] && RM_BRANCH=master ES_VERSION=$(cat build-tools-internal/version.properties \ diff --git a/.ci/jobs.t/elastic+elasticsearch+intake.yml b/.ci/jobs.t/elastic+elasticsearch+intake.yml index f4aa32ec7e75..138318897b52 100644 --- a/.ci/jobs.t/elastic+elasticsearch+intake.yml +++ b/.ci/jobs.t/elastic+elasticsearch+intake.yml @@ -54,6 +54,13 @@ kill-phase-on: NEVER current-parameters: true git-revision: true + - multijob: + name: Publish snapshot artifacts + projects: + - name: elastic+elasticsearch+%BRANCH%+dra-snapshot + kill-phase-on: NEVER + current-parameters: true + git-revision: true - multijob: name: Update last good commit projects: diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml index a5e590bc4a76..773db8ff2f92 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-arm.yml @@ -3,8 +3,7 @@ name: elastic+elasticsearch+%BRANCH%+multijob+platform-support-arm display-name: "elastic / elasticsearch # %BRANCH% - arm compatibility" description: "Elasticsearch %BRANCH% ARM (aarch64) compatibility testing.\n" - # Don't use ramdisk for now as we are exhausting memory and causing oomkiller to trigger - # child-workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+multijob+platform-support-arm" + child-workspace: "/dev/shm/elastic+elasticsearch+%BRANCH%+multijob+platform-support-arm" project-type: matrix node: master scm: @@ -17,6 +16,15 @@ values: - "almalinux-8-aarch64&&immutable" - "ubuntu-1804-aarch64&&immutable" + - axis: + type: user-defined + name: GRADLE_TASK + values: + - 'checkPart1' + - 'checkPart2' + - 'checkPart3' + - 'bwcTestSnapshots' + - 'checkRestCompat' builders: - inject: properties-file: '.ci/java-versions-aarch64.properties' @@ -28,4 +36,4 @@ JAVA16_HOME=$HOME/.java/jdk16 - shell: | #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true check + $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true $GRADLE_TASK diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml index 6aec8b04a2f3..23f647f5af88 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml @@ -5,6 +5,7 @@ description: "Elasticsearch %BRANCH% unix compatibility testing.\n" project-type: matrix node: master + child-workspace: "/var/lib/jenkins/workspace/elastic+elasticsearch+%BRANCH%+multijob+platform-support-unix" scm: - git: wipe-workspace: false diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+dra-snapshots-trigger.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+dra-snapshots-trigger.yml deleted file mode 100644 index 7f1c639770ae..000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+dra-snapshots-trigger.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -jjbb-template: periodic-trigger-lgc.yml -vars: - - periodic-job: elastic+elasticsearch+%BRANCH%+periodic+dra-snapshot - - lgc-job: elastic+elasticsearch+%BRANCH%+intake - - cron: "H H/12 * * *" diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+dra-snapshots.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+dra-snapshots.yml deleted file mode 100644 index e0c7e7d122d3..000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+dra-snapshots.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- job: - name: elastic+elasticsearch+%BRANCH%+periodic+dra-snapshot - workspace: /dev/shm/elastic+elasticsearch+%BRANCH%+periodic+dra-snapshot - display-name: "elastic / elasticsearch # %BRANCH% - DRA snapshot" - description: "Publishing Daily Releasable Artifacts (DRAs) of Elasticsearch %BRANCH% snapshots.\n" - node: "ubuntu-20.04" - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - WORKFLOW="snapshot" - RM_BRANCH="%BRANCH%" && [[ "%BRANCH%" == "main" ]] && RM_BRANCH=master - ES_VERSION=$(cat build-tools-internal/version.properties \ - | grep elasticsearch \ - | sed "s/elasticsearch *= *//g") - VERSION_SUFFIX="" && [[ "$WORKFLOW" == "snapshot" ]] && VERSION_SUFFIX="-SNAPSHOT" - BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats $RM_BRANCH $ES_VERSION $WORKFLOW)" - ML_CPP_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh ml-cpp $RM_BRANCH $ES_VERSION $WORKFLOW)" - set -euo pipefail - set +x - VAULT_TOKEN=$(vault write -field=token auth/approle/login role_id=$VAULT_ROLE_ID secret_id=$VAULT_SECRET_ID) - export VAULT_TOKEN - $WORKSPACE/.ci/scripts/run-gradle.sh -Ddra.artifacts=true \ - -Ddra.artifacts.dependency.beats=${BEATS_BUILD_ID} \ - -Ddra.artifacts.dependency.ml-cpp=${ML_CPP_BUILD_ID} \ - -Ddra.workflow=$WORKFLOW \ - -Dcsv=$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv \ - buildReleaseArtifacts \ - exportCompressedDockerImages \ - :distribution:generateDependenciesReport - - unset VAULT_TOKEN - set -x - $WORKSPACE/x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX" - - # we regenerate this file as part of the release manager invocation - rm $WORKSPACE/build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512 - - # Allow other users access to read the artifacts so they are readable in the - # container - find $WORKSPACE -type f -path "*/build/distributions/*" -exec chmod a+r {} \; - - # Allow other users write access to create checksum files - find $WORKSPACE -type d -path "*/build/distributions" -exec chmod a+w {} \; - - # Artifacts should be generated - docker run --rm \ - --name release-manager \ - -e VAULT_ADDR \ - -e VAULT_ROLE_ID \ - -e VAULT_SECRET_ID \ - --mount type=bind,readonly=false,src="$PWD",target=/artifacts \ - docker.elastic.co/infra/release-manager:latest \ - cli collect \ - --project elasticsearch \ - --branch "$RM_BRANCH" \ - --commit "$GIT_COMMIT" \ - --workflow "$WORKFLOW" \ - --version "$ES_VERSION" \ - --artifact-set main \ - --dependency beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json \ - --dependency ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml index 47a8d4f48cc5..de4886c9f8eb 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+ear.yml @@ -16,7 +16,7 @@ #!/bin/bash # Configure a dm-crypt volume backed by a file set -e - dd if=/dev/zero of=dm-crypt.img bs=1 count=0 seek=60GB + dd if=/dev/zero of=dm-crypt.img bs=1 count=0 seek=80GB dd if=/dev/urandom of=key.secret bs=2k count=1 LOOP=$(losetup -f) sudo losetup $LOOP dm-crypt.img diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+java-preview-features-trigger.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+java-preview-features-trigger.yml deleted file mode 100644 index 9b1893362c05..000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+java-preview-features-trigger.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -jjbb-template: periodic-trigger-lgc.yml -vars: - - periodic-job: elastic+elasticsearch+%BRANCH%+periodic+java-preview-features - - lgc-job: elastic+elasticsearch+%BRANCH%+intake - - cron: "H H/12 * * *" diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+java-preview-features.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+java-preview-features.yml deleted file mode 100644 index 592bb2c3941c..000000000000 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+java-preview-features.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- job: - name: "elastic+elasticsearch+%BRANCH%+periodic+java-preview-features" - display-name: "elastic / elasticsearch # %BRANCH% - java preview features" - description: "Testing of the Elasticsearch %BRANCH% branch with java preview features enabled.\n" - project-type: matrix - child-workspace: /dev/shm/elastic+elasticsearch+%BRANCH%+periodic+java-preview-features - node: master - scm: - - git: - wipe-workspace: false - axes: - - axis: - type: slave - name: nodes - values: - - "general-purpose" - # We shred out these jobs to avoid running out of memory given since we use a ramdisk workspace - - axis: - type: user-defined - name: GRADLE_TASK - values: - - 'checkPart1' - - 'checkPart2' - - 'checkPart3' - - 'bwcTestSnapshots' - - 'checkRestCompat' - builders: - - inject: - properties-file: '.ci/java-versions.properties' - properties-content: | - JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/openjdk19 - JAVA11_HOME=$HOME/.java/java11 - JAVA16_HOME=$HOME/.java/openjdk16 - - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh -Dtests.jvm.argline="--enable-preview" -Dbwc.checkout.align=true $GRADLE_TASK diff --git a/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml index 0ce470d37b34..7447deb49dcf 100644 --- a/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+periodic+release-tests.yml @@ -23,7 +23,9 @@ mkdir -p ${BEATS_DIR} curl --fail -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-arm64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz + curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-arm64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz # Fetch ML artifacts export ML_IVY_REPO=$(mktemp -d) diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml new file mode 100644 index 000000000000..9cd24f1391e0 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml @@ -0,0 +1,55 @@ +--- +- job: + name: "elastic+elasticsearch+pull-request+bwc-snapshots-windows" + display-name: "elastic / elasticsearch - pull request bwc windows" + description: "Testing of Elasticsearch pull requests - bwc windows" + project-type: matrix + node: master + child-workspace: "C:\\Users\\jenkins\\workspace\\bwc-snapshots\\${BUILD_NUMBER}" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/bwc-snapshots-windows.*' + github-hooks: true + status-context: elasticsearch-ci/bwc-snapshots-windows + cancel-builds-on-update: true + black-list-target-branches: + - 6.8 + excluded-regions: + - ^docs/.* + white-list-labels: + - 'test-windows' + black-list-labels: + - '>test-mute' + axes: + - axis: + type: slave + name: nodes + values: + - "windows-immutable" + - axis: + type: yaml + filename: ".ci/snapshotBwcVersions" + name: "BWC_VERSION" + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA + JAVA11_HOME=$USERPROFILE\\.java\\java11 + JAVA16_HOME=$USERPROFILE\\.java\\openjdk16 + - batch: | + del /f /s /q %USERPROFILE%\.gradle\init.d\*.* + mkdir %USERPROFILE%\.gradle\init.d + copy .ci\init.gradle %USERPROFILE%\.gradle\init.d\ + ( + echo call %GRADLEW_BAT% --max-workers=4 -Dbwc.checkout.align=true v%BWC_VERSION%#bwcTest ^|^| exit /b 1 + ) | java -jar "C:\Program Files\infra\bin\runbld" --redirect-stderr - diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml index dbb54e37fb23..f4ded4d1eeca 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml @@ -33,6 +33,7 @@ properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA + JAVA11_HOME=$USERPROFILE\\.java\\java11 JAVA16_HOME=$USERPROFILE\\.java\\openjdk16 GRADLE_TASK=checkPart1 - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml index ab367188cb8e..53295df00627 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml @@ -33,6 +33,7 @@ properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA + JAVA11_HOME=$USERPROFILE\\.java\\java11 JAVA16_HOME=$USERPROFILE\\.java\\openjdk16 GRADLE_TASK=checkPart2 - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml index a287c8c0bd9c..80e4bf7f47c0 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml @@ -34,6 +34,7 @@ properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA RUNTIME_JAVA_HOME=$USERPROFILE\\.java\\$ES_RUNTIME_JAVA + JAVA11_HOME=$USERPROFILE\\.java\\java11 JAVA16_HOME=$USERPROFILE\\.java\\openjdk16 GRADLE_TASK=checkPart3 - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml index 54242eee118b..125b0a00b6c8 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml @@ -45,13 +45,16 @@ mkdir -p ${BEATS_DIR} curl --fail -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz - curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-arm64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz + curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl --fail -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-arm64.tar.gz" https://artifacts-snapshot.elastic.co/beats/${ES_VERSION}-SNAPSHOT/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-arm64.tar.gz # Fetch ML artifacts export ML_IVY_REPO=$(mktemp -d) mkdir -p ${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION} curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}-deps.zip" https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT-deps.zip curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}-nodeps.zip" https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT-nodeps.zip + curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}.zip" https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT.zip $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false -Dbuild.ml_cpp.repo=file://${ML_IVY_REPO} \ -Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef build diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index a6b2d4a15d84..07582c4892d5 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -10,3 +10,4 @@ ES_RUNTIME_JAVA: - openjdk17 - openjdk18 - openjdk19 + - openjdk20 diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 5a7b3c792d7f..43d1c5a82b90 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -43,4 +43,4 @@ fi ## Gradle is able to resolve dependencies resolved with earlier gradle versions ## therefore we run main _AFTER_ we run 6.8 which uses an earlier gradle version export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} -./gradlew --parallel clean -s resolveAllDependencies -Dorg.gradle.warning.mode=none +./gradlew --parallel clean -s resolveAllDependencies -Dorg.gradle.warning.mode=none -Drecurse.bwc=true diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index dd700ed35e2c..c89f884223c0 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - - "7.17.8" - - "8.5.2" - - "8.6.0" + - "7.17.10" + - "8.6.2" - "8.7.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7a95e4eaef1b..6a3d07df917e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -16,6 +16,6 @@ x-pack/plugin/core/src/main/resources/monitoring-logstash.json @elastic/infra-mo x-pack/plugin/core/src/main/resources/monitoring-mb-ilm-policy.json @elastic/infra-monitoring-ui x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @elastic/infra-monitoring-ui -# Elastic Agent -x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet @elastic/elastic-agent-control-plane -x-pack/plugin/core/src/main/resources/fleet-* @elastic/elastic-agent-control-plane +# Fleet +x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet @elastic/fleet +x-pack/plugin/core/src/main/resources/fleet-* @elastic/fleet diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml index 0bae5de1d0f8..b434572cf6f3 100644 --- a/.github/ISSUE_TEMPLATE/bug.yml +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -4,22 +4,27 @@ labels: [">bug", "needs:triage"] body: - type: markdown attributes: - value: | + value: > Github is reserved for bug reports and feature requests; it is not the place for general questions. If you have a question or an unconfirmed bug, please visit the [forums](https://discuss.elastic.co/c/elasticsearch). - Please also check your OS is [supported](https://www.elastic.co/support/matrix#show_os). - If it is not, the issue is likely to be closed. + Please also check your OS is [supported](https://www.elastic.co/support/matrix#show_os), + and that the version of Elasticsearch has not passed [end-of-life](https://www.elastic.co/support/eol). + If you are using an unsupported OS or an unsupported version then the issue is likely to be closed. + For security vulnerabilities please only send reports to security@elastic.co. See https://www.elastic.co/community/security for more information. + Please fill in the following details to help us reproduce the bug: - type: input id: es_version attributes: label: Elasticsearch Version - description: The version of Elasticsearch you are running, found with `bin/elasticsearch --version` + description: >- + The version of Elasticsearch you are running, found with `bin/elasticsearch --version`. + Ensure this version has not [passed end-of-life](https://www.elastic.co/support/eol). validations: required: true - type: input diff --git a/BUILDING.md b/BUILDING.md index fa2258608144..692a7a559de2 100644 --- a/BUILDING.md +++ b/BUILDING.md @@ -78,7 +78,7 @@ For updated or newly added dependencies you need to add an entry to this verific ``` -In case of updating a dependency, ensure to remove the unused entry of the outdated dependency manually from the verifcation.xml file. +In case of updating a dependency, ensure to remove the unused entry of the outdated dependency manually from the `verification-metadata.xml` file. You can also automate the generation of this entry by running your build using the `--write-verification-metadata` commandline option: ``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 61c31c7665d7..00af53743fd3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -870,6 +870,17 @@ benefits of this kind of change are very small, and in our experience it is not worth investing the substantial effort needed to review them. This especially includes changes suggested by tools. +We normally immediately reject PRs which target platforms or system +configurations that are not in the [official support +matrix](https://www.elastic.co/support/matrix). We choose to support particular +platforms with care because we must work to ensure that every Elasticsearch +release works completely on every platform, and we must spend time +investigating test failures and performance regressions there too. We cannot +determine whether PRs which target unsupported platforms or configurations meet +our quality standards, nor can we guarantee that the change they introduce will +continue to work in future releases. We do not want Elasticsearch to suddenly +stop working on a particular platform after an upgrade. + We sometimes reject contributions due to the low quality of the submission since low-quality submissions tend to take unreasonable effort to review properly. Quality is rather subjective so it is hard to describe exactly how to diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index 2a8bf1b91ce7..c69c357b1378 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -131,7 +131,7 @@ public void setUp() throws Exception { ); } Metadata metadata = mb.build(); - RoutingTable.Builder rb = RoutingTable.builder(); + RoutingTable.Builder rb = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); for (int i = 1; i <= numIndices; i++) { rb.addAsNew(metadata.index("test_" + i)); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index 09080029ba25..f933957c7486 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -81,7 +81,8 @@ public static AllocationService createAllocationService(Settings settings, Clust NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE, - EmptySnapshotsInfoService.INSTANCE + EmptySnapshotsInfoService.INSTANCE, + TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY ); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index 20e8106d93c0..deceab114f4f 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -29,6 +29,7 @@ import org.elasticsearch.health.HealthIndicatorResult; import org.elasticsearch.health.node.HealthInfo; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.openjdk.jmh.annotations.Benchmark; @@ -45,6 +46,7 @@ import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -127,7 +129,7 @@ public void setUp() throws Exception { .numberOfReplicas(numReplicas) .build(); - final IndexRoutingTable.Builder indexRountingTableBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()); + final IndexRoutingTable.Builder indexRountingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); for (int shardIdNumber = 0; shardIdNumber < numShards; shardIdNumber++) { ShardId shardId = new ShardId(indexMetadata.getIndex(), shardIdNumber); final IndexShardRoutingTable.Builder shardBuilder = new IndexShardRoutingTable.Builder(shardId); @@ -135,7 +137,8 @@ public void setUp() throws Exception { shardId, true, RecoverySource.ExistingStoreRecoverySource.INSTANCE, - decidersNoUnassignedInfo + decidersNoUnassignedInfo, + ShardRouting.Role.DEFAULT ); shardBuilder.addShard(shardRouting); if (shardIdNumber < numReplicas) { @@ -144,7 +147,8 @@ public void setUp() throws Exception { shardId, false, RecoverySource.EmptyStoreRecoverySource.INSTANCE, - decidersNoUnassignedInfo + decidersNoUnassignedInfo, + ShardRouting.Role.DEFAULT ) ); } @@ -171,7 +175,7 @@ public void setUp() throws Exception { new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()) ); clusterService.getClusterApplierService().setInitialState(initialClusterState); - indicatorService = new ShardsAvailabilityHealthIndicatorService(clusterService, allocationService); + indicatorService = new ShardsAvailabilityHealthIndicatorService(clusterService, allocationService, new SystemIndices(List.of())); } private int toInt(String v) { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/TestShardRoutingRoleStrategies.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/TestShardRoutingRoleStrategies.java new file mode 100644 index 000000000000..2c6207285595 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/TestShardRoutingRoleStrategies.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.routing.allocation; + +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; + +public class TestShardRoutingRoleStrategies { + + /** + * A strategy which only returns the default role in all situations. This is deliberately not available to production code to avoid any + * possibility of using it instead of the strategy provided by the plugin (if so configured). + */ + public static final ShardRoutingRoleStrategy DEFAULT_ROLE_ONLY = new ShardRoutingRoleStrategy() { + @Override + public ShardRouting.Role newReplicaRole() { + return ShardRouting.Role.DEFAULT; + } + + @Override + public ShardRouting.Role newEmptyRole(int copyIndex) { + return ShardRouting.Role.DEFAULT; + } + }; + + private TestShardRoutingRoleStrategies() { + // no instances + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java index 47f1b4f41353..7e40acff82c8 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/script/ScriptScoreBenchmark.java @@ -40,7 +40,7 @@ import org.elasticsearch.script.ScoreScript; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.SourceProvider; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -90,7 +90,7 @@ public class ScriptScoreBenchmark { private final SearchLookup lookup = new SearchLookup( fieldTypes::get, (mft, lookup, fdo) -> mft.fielddataBuilder(FieldDataContext.noRuntimeFields("benchmark")).build(fieldDataCache, breakerService), - new SourceLookup.ReaderSourceProvider() + SourceProvider.fromStoredFields() ); @Param({ "expression", "metal", "painless_cast", "painless_def" }) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java new file mode 100644 index 000000000000..1ee1d6217598 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/vector/DistanceFunctionBenchmark.java @@ -0,0 +1,467 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.vector; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.script.field.vectors.BinaryDenseVector; +import org.elasticsearch.script.field.vectors.ByteBinaryDenseVector; +import org.elasticsearch.script.field.vectors.ByteKnnDenseVector; +import org.elasticsearch.script.field.vectors.KnnDenseVector; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OperationsPerInvocation; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +/** + * Various benchmarks for the distance functions + * used by indexed and non-indexed vectors. + * Parameters include element, dims, function, and type. + * For individual local tests it may be useful to increase + * fork, measurement, and operations per invocation. (Note + * to also update the benchmark loop if operations per invocation + * is increased.) + */ +@Fork(1) +@Warmup(iterations = 1) +@Measurement(iterations = 2) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@OperationsPerInvocation(25000) +@State(Scope.Benchmark) +public class DistanceFunctionBenchmark { + + @Param({ "float", "byte" }) + private String element; + + @Param({ "96" }) + private int dims; + + @Param({ "dot", "cosine", "l1", "l2" }) + private String function; + + @Param({ "knn", "binary" }) + private String type; + + private abstract static class BenchmarkFunction { + + final int dims; + + private BenchmarkFunction(int dims) { + this.dims = dims; + } + + abstract void execute(Consumer consumer); + } + + private abstract static class KnnFloatBenchmarkFunction extends BenchmarkFunction { + + final float[] docVector; + final float[] queryVector; + + private KnnFloatBenchmarkFunction(int dims, boolean normalize) { + super(dims); + + docVector = new float[dims]; + queryVector = new float[dims]; + + float docMagnitude = 0f; + float queryMagnitude = 0f; + + for (int i = 0; i < dims; ++i) { + docVector[i] = (float) (dims - i); + queryVector[i] = (float) i; + + docMagnitude += (float) (dims - i); + queryMagnitude += (float) i; + } + + docMagnitude /= dims; + queryMagnitude /= dims; + + if (normalize) { + for (int i = 0; i < dims; ++i) { + docVector[i] /= docMagnitude; + queryVector[i] /= queryMagnitude; + } + } + } + } + + private abstract static class BinaryFloatBenchmarkFunction extends BenchmarkFunction { + + final BytesRef docVector; + final float[] queryVector; + + private BinaryFloatBenchmarkFunction(int dims, boolean normalize) { + super(dims); + + float[] docVector = new float[dims]; + queryVector = new float[dims]; + + float docMagnitude = 0f; + float queryMagnitude = 0f; + + for (int i = 0; i < dims; ++i) { + docVector[i] = (float) (dims - i); + queryVector[i] = (float) i; + + docMagnitude += (float) (dims - i); + queryMagnitude += (float) i; + } + + docMagnitude /= dims; + queryMagnitude /= dims; + + ByteBuffer byteBuffer = ByteBuffer.allocate(dims * 4 + 4); + + for (int i = 0; i < dims; ++i) { + if (normalize) { + docVector[i] /= docMagnitude; + queryVector[i] /= queryMagnitude; + } + + byteBuffer.putFloat(docVector[i]); + } + + byteBuffer.putFloat(docMagnitude); + this.docVector = new BytesRef(byteBuffer.array()); + } + } + + private abstract static class KnnByteBenchmarkFunction extends BenchmarkFunction { + + final byte[] docVector; + final byte[] queryVector; + + final float queryMagnitude; + + private KnnByteBenchmarkFunction(int dims) { + super(dims); + + ByteBuffer docVector = ByteBuffer.allocate(dims); + queryVector = new byte[dims]; + + float queryMagnitude = 0f; + + for (int i = 0; i < dims; ++i) { + docVector.put((byte) (dims - i)); + queryVector[i] = (byte) i; + + queryMagnitude += (float) i; + } + + this.docVector = docVector.array(); + this.queryMagnitude = queryMagnitude / dims; + } + } + + private abstract static class BinaryByteBenchmarkFunction extends BenchmarkFunction { + + final BytesRef docVector; + final byte[] queryVector; + + final float queryMagnitude; + + private BinaryByteBenchmarkFunction(int dims) { + super(dims); + + ByteBuffer docVector = ByteBuffer.allocate(dims + 4); + queryVector = new byte[dims]; + + float docMagnitude = 0f; + float queryMagnitude = 0f; + + for (int i = 0; i < dims; ++i) { + docVector.put((byte) (dims - i)); + queryVector[i] = (byte) i; + + docMagnitude += (float) (dims - i); + queryMagnitude += (float) i; + } + + docVector.putFloat(docMagnitude / dims); + this.docVector = new BytesRef(docVector.array()); + this.queryMagnitude = queryMagnitude / dims; + + } + } + + private static class DotKnnFloatBenchmarkFunction extends KnnFloatBenchmarkFunction { + + private DotKnnFloatBenchmarkFunction(int dims) { + super(dims, false); + } + + @Override + public void execute(Consumer consumer) { + new KnnDenseVector(docVector).dotProduct(queryVector); + } + } + + private static class DotKnnByteBenchmarkFunction extends KnnByteBenchmarkFunction { + + private DotKnnByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteKnnDenseVector(docVector).dotProduct(queryVector); + } + } + + private static class DotBinaryFloatBenchmarkFunction extends BinaryFloatBenchmarkFunction { + + private DotBinaryFloatBenchmarkFunction(int dims) { + super(dims, false); + } + + @Override + public void execute(Consumer consumer) { + new BinaryDenseVector(docVector, dims, Version.CURRENT).dotProduct(queryVector); + } + } + + private static class DotBinaryByteBenchmarkFunction extends BinaryByteBenchmarkFunction { + + private DotBinaryByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteBinaryDenseVector(docVector, dims).dotProduct(queryVector); + } + } + + private static class CosineKnnFloatBenchmarkFunction extends KnnFloatBenchmarkFunction { + + private CosineKnnFloatBenchmarkFunction(int dims) { + super(dims, true); + } + + @Override + public void execute(Consumer consumer) { + new KnnDenseVector(docVector).cosineSimilarity(queryVector, false); + } + } + + private static class CosineKnnByteBenchmarkFunction extends KnnByteBenchmarkFunction { + + private CosineKnnByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteKnnDenseVector(docVector).cosineSimilarity(queryVector, queryMagnitude); + } + } + + private static class CosineBinaryFloatBenchmarkFunction extends BinaryFloatBenchmarkFunction { + + private CosineBinaryFloatBenchmarkFunction(int dims) { + super(dims, true); + } + + @Override + public void execute(Consumer consumer) { + new BinaryDenseVector(docVector, dims, Version.CURRENT).cosineSimilarity(queryVector, false); + } + } + + private static class CosineBinaryByteBenchmarkFunction extends BinaryByteBenchmarkFunction { + + private CosineBinaryByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteBinaryDenseVector(docVector, dims).cosineSimilarity(queryVector, queryMagnitude); + } + } + + private static class L1KnnFloatBenchmarkFunction extends KnnFloatBenchmarkFunction { + + private L1KnnFloatBenchmarkFunction(int dims) { + super(dims, false); + } + + @Override + public void execute(Consumer consumer) { + new KnnDenseVector(docVector).l1Norm(queryVector); + } + } + + private static class L1KnnByteBenchmarkFunction extends KnnByteBenchmarkFunction { + + private L1KnnByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteKnnDenseVector(docVector).l1Norm(queryVector); + } + } + + private static class L1BinaryFloatBenchmarkFunction extends BinaryFloatBenchmarkFunction { + + private L1BinaryFloatBenchmarkFunction(int dims) { + super(dims, true); + } + + @Override + public void execute(Consumer consumer) { + new BinaryDenseVector(docVector, dims, Version.CURRENT).l1Norm(queryVector); + } + } + + private static class L1BinaryByteBenchmarkFunction extends BinaryByteBenchmarkFunction { + + private L1BinaryByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteBinaryDenseVector(docVector, dims).l1Norm(queryVector); + } + } + + private static class L2KnnFloatBenchmarkFunction extends KnnFloatBenchmarkFunction { + + private L2KnnFloatBenchmarkFunction(int dims) { + super(dims, false); + } + + @Override + public void execute(Consumer consumer) { + new KnnDenseVector(docVector).l2Norm(queryVector); + } + } + + private static class L2KnnByteBenchmarkFunction extends KnnByteBenchmarkFunction { + + private L2KnnByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + new ByteKnnDenseVector(docVector).l2Norm(queryVector); + } + } + + private static class L2BinaryFloatBenchmarkFunction extends BinaryFloatBenchmarkFunction { + + private L2BinaryFloatBenchmarkFunction(int dims) { + super(dims, true); + } + + @Override + public void execute(Consumer consumer) { + new BinaryDenseVector(docVector, dims, Version.CURRENT).l1Norm(queryVector); + } + } + + private static class L2BinaryByteBenchmarkFunction extends BinaryByteBenchmarkFunction { + + private L2BinaryByteBenchmarkFunction(int dims) { + super(dims); + } + + @Override + public void execute(Consumer consumer) { + consumer.accept(new ByteBinaryDenseVector(docVector, dims).l2Norm(queryVector)); + } + } + + private BenchmarkFunction benchmarkFunction; + + @Setup + public void setBenchmarkFunction() { + switch (element) { + case "float" -> { + switch (function) { + case "dot" -> benchmarkFunction = switch (type) { + case "knn" -> new DotKnnFloatBenchmarkFunction(dims); + case "binary" -> new DotBinaryFloatBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + case "cosine" -> benchmarkFunction = switch (type) { + case "knn" -> new CosineKnnFloatBenchmarkFunction(dims); + case "binary" -> new CosineBinaryFloatBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + case "l1" -> benchmarkFunction = switch (type) { + case "knn" -> new L1KnnFloatBenchmarkFunction(dims); + case "binary" -> new L1BinaryFloatBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + case "l2" -> benchmarkFunction = switch (type) { + case "knn" -> new L2KnnFloatBenchmarkFunction(dims); + case "binary" -> new L2BinaryFloatBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + default -> throw new UnsupportedOperationException("unexpected function [" + function + "]"); + } + } + case "byte" -> { + switch (function) { + case "dot" -> benchmarkFunction = switch (type) { + case "knn" -> new DotKnnByteBenchmarkFunction(dims); + case "binary" -> new DotBinaryByteBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + case "cosine" -> benchmarkFunction = switch (type) { + case "knn" -> new CosineKnnByteBenchmarkFunction(dims); + case "binary" -> new CosineBinaryByteBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + case "l1" -> benchmarkFunction = switch (type) { + case "knn" -> new L1KnnByteBenchmarkFunction(dims); + case "binary" -> new L1BinaryByteBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + case "l2" -> benchmarkFunction = switch (type) { + case "knn" -> new L2KnnByteBenchmarkFunction(dims); + case "binary" -> new L2BinaryByteBenchmarkFunction(dims); + default -> throw new UnsupportedOperationException("unexpected type [" + type + "]"); + }; + default -> throw new UnsupportedOperationException("unexpected function [" + function + "]"); + } + } + default -> throw new UnsupportedOperationException("unexpected element [" + element + "]"); + } + ; + } + + @Benchmark + public void benchmark() throws IOException { + for (int i = 0; i < 25000; ++i) { + benchmarkFunction.execute(Object::toString); + } + } +} diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java index f6a5db279792..49148330e02e 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/PrecommitTaskPlugin.java @@ -31,10 +31,10 @@ public void apply(Project project) { "lifecycle-base", p -> project.getTasks().named(LifecycleBasePlugin.CHECK_TASK_NAME).configure(t -> t.dependsOn(precommit)) ); - project.getPluginManager().withPlugin("java", p -> { + project.getPluginManager().withPlugin("java-base", p -> { // run compilation as part of precommit project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets().all(sourceSet -> - precommit.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName())) + precommit.configure(t -> t.dependsOn(sourceSet.getClassesTaskName())) ); // make sure tests run after all precommit tasks project.getTasks().withType(Test.class).configureEach(t -> t.mustRunAfter(precommit)); diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 707faf8749c1..3d7edf7ef86f 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -115,6 +115,10 @@ gradlePlugin { id = 'elasticsearch.java' implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin' } + legacyInternalJavaRestTest { + id = 'elasticsearch.legacy-java-rest-test' + implementationClass = 'org.elasticsearch.gradle.internal.test.rest.LegacyJavaRestTestPlugin' + } internalJavaRestTest { id = 'elasticsearch.internal-java-rest-test' implementationClass = 'org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin' @@ -167,9 +171,17 @@ gradlePlugin { id = 'elasticsearch.validate-rest-spec' implementationClass = 'org.elasticsearch.gradle.internal.precommit.ValidateRestSpecPlugin' } + legacyYamlRestCompatTest { + id = 'elasticsearch.legacy-yaml-rest-compat-test' + implementationClass = 'org.elasticsearch.gradle.internal.test.rest.compat.compat.LegacyYamlRestCompatTestPlugin' + } yamlRestCompatTest { id = 'elasticsearch.yaml-rest-compat-test' - implementationClass = 'org.elasticsearch.gradle.internal.rest.compat.YamlRestCompatTestPlugin' + implementationClass = 'org.elasticsearch.gradle.internal.test.rest.compat.compat.YamlRestCompatTestPlugin' + } + legacyYamlRestTest { + id = 'elasticsearch.legacy-yaml-rest-test' + implementationClass = 'org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin' } yamlRestTest { id = 'elasticsearch.internal-yaml-rest-test' @@ -220,7 +232,7 @@ configurations { dependencies { constraints { - integTestImplementation('org.ow2.asm:asm:9.3') + integTestImplementation('org.ow2.asm:asm:9.4') implementation "org.yaml:snakeyaml:${versions.snakeyaml}" } components.all(JacksonAlignmentRule) @@ -324,6 +336,10 @@ tasks.register("bootstrapPerformanceTests", Copy) { branchWrapper:"${-> gradle.gradleVersion}".toString()]) } +tasks.named("jar") { + exclude("classpath.index") +} + def resolveMainWrapperVersion() { new URL("https://raw.githubusercontent.com/elastic/elasticsearch/main/build-tools-internal/src/main/resources/minimumGradleVersion").text.trim() } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy index 2fa7ed6faaee..2756b9745bc7 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/fixtures/AbstractRestResourcesFuncTest.groovy @@ -13,6 +13,7 @@ abstract class AbstractRestResourcesFuncTest extends AbstractGradleFuncTest { def setup() { subProject(":test:framework") << "apply plugin: 'elasticsearch.java'" + subProject(":test:test-clusters") << "apply plugin: 'elasticsearch.java'" subProject(":test:yaml-rest-runner") << "apply plugin: 'elasticsearch.java'" subProject(":rest-api-spec") << """ diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPluginFuncTest.groovy index 017b7333f8de..31ecffc07f63 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPluginFuncTest.groovy @@ -174,12 +174,12 @@ class TestingConventionsPrecommitPluginFuncTest extends AbstractGradleInternalPl given: clazz(dir('src/yamlRestTest/java'), "org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase") buildFile << """ - apply plugin:'elasticsearch.internal-yaml-rest-test' - + apply plugin:'elasticsearch.legacy-yaml-rest-test' + dependencies { yamlRestTestImplementation "org.apache.lucene:tests.util:1.0" yamlRestTestImplementation "org.junit:junit:4.42" - } + } """ clazz(dir("src/yamlRestTest/java"), "org.acme.valid.SomeMatchingIT", "org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase") { @@ -216,11 +216,11 @@ class TestingConventionsPrecommitPluginFuncTest extends AbstractGradleInternalPl buildFile << """ import org.elasticsearch.gradle.internal.precommit.TestingConventionsCheckTask apply plugin:'$pluginName' - + dependencies { ${sourceSetName}Implementation "org.apache.lucene:tests.util:1.0" ${sourceSetName}Implementation "org.junit:junit:4.42" - } + } tasks.withType(TestingConventionsCheckTask).configureEach { suffix 'IT' suffix 'Tests' @@ -252,19 +252,19 @@ class TestingConventionsPrecommitPluginFuncTest extends AbstractGradleInternalPl ) where: - pluginName | taskName | sourceSetName - "elasticsearch.internal-java-rest-test" | ":javaRestTestTestingConventions" | "javaRestTest" + pluginName | taskName | sourceSetName + "elasticsearch.legacy-java-rest-test" | ":javaRestTestTestingConventions" | "javaRestTest" "elasticsearch.internal-cluster-test" | ":internalClusterTestTestingConventions" | "internalClusterTest" } private void simpleJavaBuild() { buildFile << """ apply plugin:'java' - + dependencies { testImplementation "org.apache.lucene:tests.util:1.0" testImplementation "org.junit:junit:4.42" - } + } """ } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPluginFuncTest.groovy deleted file mode 100644 index e70be2b4de4d..000000000000 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPluginFuncTest.groovy +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.test.rest - -import spock.lang.IgnoreIf - -import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest -import org.gradle.testkit.runner.TaskOutcome - -@IgnoreIf({ os.isWindows() }) -class InternalYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { - - def "yamlRestTest does nothing when there are no tests"() { - given: - // RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false - buildFile << """ - plugins { - id 'elasticsearch.internal-yaml-rest-test' - } - """ - - when: - def result = gradleRunner("yamlRestTest").build() - - then: - result.task(':yamlRestTest').outcome == TaskOutcome.NO_SOURCE - result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE - result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.NO_SOURCE - } - - def "yamlRestTest executes and copies api and tests to correct source set"() { - given: - // RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false - internalBuild() - buildFile << """ - apply plugin: 'elasticsearch.internal-yaml-rest-test' - - dependencies { - yamlRestTestImplementation "junit:junit:4.12" - } - - // can't actually spin up test cluster from this test - tasks.withType(Test).configureEach{ enabled = false } - - tasks.register("printYamlRestTestClasspath").configure { - doLast { - println sourceSets.yamlRestTest.runtimeClasspath.asPath - } - } - """ - String api = "foo.json" - setupRestResources([api]) - addRestTestsToProject(["10_basic.yml"], "yamlRestTest") - file("src/yamlRestTest/java/MockIT.java") << "import org.junit.Test;class MockIT { @Test public void doNothing() { }}" - - when: - def result = gradleRunner("yamlRestTest", "printYamlRestTestClasspath").build() - - then: - result.task(':yamlRestTest').outcome == TaskOutcome.SKIPPED - result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.SUCCESS - result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE - - file("/build/restResources/yamlSpecs/rest-api-spec/api/" + api).exists() - file("/build/resources/yamlRestTest/rest-api-spec/test/10_basic.yml").exists() - file("/build/classes/java/yamlRestTest/MockIT.class").exists() - - // check that our copied specs and tests are on the yamlRestTest classpath - result.output.contains("./build/restResources/yamlSpecs") - result.output.contains("./build/restResources/yamlTests") - - when: - result = gradleRunner("yamlRestTest").build() - - then: - result.task(':yamlRestTest').outcome == TaskOutcome.SKIPPED - result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.UP_TO_DATE - result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE - } - - def "#type projects are wired into test cluster setup"() { - given: - internalBuild() - localDistroSetup() - def distroVersion = VersionProperties.getElasticsearch() - - def subProjectBuildFile = subProject(pluginProjectPath) - subProjectBuildFile << """ - apply plugin: 'elasticsearch.esplugin' - apply plugin: 'elasticsearch.internal-yaml-rest-test' - - dependencies { - yamlRestTestImplementation "junit:junit:4.12" - } - - esplugin { - description = 'test plugin' - classname = 'com.acme.plugin.TestPlugin' - } - - // for testing purposes only - configurations.compileOnly.dependencies.clear() - - testClusters { - yamlRestTest { - version = "$distroVersion" - testDistribution = 'INTEG_TEST' - } - } - """ - def testFile = new File(subProjectBuildFile.parentFile, 'src/yamlRestTest/java/org/acme/SomeTestIT.java') - testFile.parentFile.mkdirs() - testFile << """ - package org.acme; - - import org.junit.Test; - - public class SomeTestIT { - @Test - public void someMethod() { - } - } - """ - - when: - def result = gradleRunner("yamlRestTest", "--console", 'plain', '--stacktrace').buildAndFail() - - then: - result.task(":distribution:archives:integ-test-zip:buildExpanded").outcome == TaskOutcome.SUCCESS - result.getOutput().contains(expectedInstallLog) - - where: - type | pluginProjectPath | expectedInstallLog - "plugin" | ":plugins:plugin-a" | "installing 1 plugins in a single transaction" - "module" | ":modules:module-a" | "Installing 1 modules" - } - - private void localDistroSetup() { - settingsFile << """ - include ":distribution:archives:integ-test-zip" - """ - def distProjectFolder = file("distribution/archives/integ-test-zip") - file(distProjectFolder, 'current-marker.txt') << "current" - - def elasticPluginScript = file(distProjectFolder, 'src/bin/elasticsearch-plugin') - elasticPluginScript << """#!/bin/bash -@echo off -echo "Installing plugin \$0" -""" - assert elasticPluginScript.setExecutable(true) - - def elasticKeystoreScript = file(distProjectFolder, 'src/bin/elasticsearch-keystore') - elasticKeystoreScript << """#!/bin/bash -@echo off -echo "Installing keystore \$0" -""" - assert elasticKeystoreScript.setExecutable(true) - - def elasticScript = file(distProjectFolder, 'src/bin/elasticsearch') - elasticScript << """#!/bin/bash -@echo off -echo "Running elasticsearch \$0" -""" - assert elasticScript.setExecutable(true) - - file(distProjectFolder, 'src/config/elasticsearch.properties') << "some propes" - file(distProjectFolder, 'src/config/jvm.options') << """ --Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m --XX:ErrorFile=logs/hs_err_pid%p.log --XX:HeapDumpPath=data -""" - file(distProjectFolder, 'build.gradle') << """ - import org.gradle.api.internal.artifacts.ArtifactAttributes; - - apply plugin:'distribution' - def buildExpanded = tasks.register("buildExpanded", Copy) { - into("build/local") - - into('es-dummy-dist') { - from('src') - from('current-marker.txt') - } - } - - configurations { - extracted { - attributes { - attribute(ArtifactAttributes.ARTIFACT_FORMAT, "directory") - } - } - } - artifacts { - it.add("extracted", buildExpanded) - } - """ - } -} diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy new file mode 100644 index 000000000000..cf0cab3b952f --- /dev/null +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestCompatTestPluginFuncTest.groovy @@ -0,0 +1,412 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest + +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.SequenceWriter +import com.fasterxml.jackson.databind.node.ObjectNode +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest +import org.elasticsearch.gradle.VersionProperties +import org.gradle.testkit.runner.TaskOutcome + +class LegacyYamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { + + def compatibleVersion = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1 + def specIntermediateDir = "restResources/v${compatibleVersion}/yamlSpecs" + def testIntermediateDir = "restResources/v${compatibleVersion}/yamlTests" + def transformTask = ":yamlRestTestV${compatibleVersion}CompatTransform" + def YAML_FACTORY = new YAMLFactory() + def MAPPER = new ObjectMapper(YAML_FACTORY) + def READER = MAPPER.readerFor(ObjectNode.class) + def WRITER = MAPPER.writerFor(ObjectNode.class) + + def setup() { + // not cc compatible due to: + // 1. TestClustersPlugin not cc compatible due to listener registration + // 2. RestIntegTestTask not cc compatible due to + configurationCacheCompatible = false + } + def "yamlRestTestVxCompatTest does nothing when there are no tests"() { + given: + subProject(":distribution:bwc:maintenance") << """ + configurations { checkout } + artifacts { + checkout(new File(projectDir, "checkoutDir")) + } + """ + + buildFile << """ + plugins { + id 'elasticsearch.legacy-yaml-rest-compat-test' + } + """ + + when: + def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest", '--stacktrace').build() + + then: + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE + result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE + result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE + result.task(transformTask).outcome == TaskOutcome.NO_SOURCE + } + + def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:maintenance"() { + given: + internalBuild() + + subProject(":distribution:bwc:maintenance") << """ + configurations { checkout } + artifacts { + checkout(new File(projectDir, "checkoutDir")) + } + """ + + buildFile << """ + apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' + + // avoids a dependency problem in this test, the distribution in use here is inconsequential to the test + import org.elasticsearch.gradle.testclusters.TestDistribution; + + dependencies { + yamlRestTestImplementation "junit:junit:4.12" + } + + // can't actually spin up test cluster from this test + tasks.withType(Test).configureEach{ enabled = false } + """ + + String wrongApi = "wrong_version.json" + String wrongTest = "wrong_version.yml" + String additionalTest = "additional_test.yml" + setupRestResources([wrongApi], [wrongTest]) //setups up resources for current version, which should not be used for this test + String sourceSetName = "yamlRestTestV" + compatibleVersion + "Compat" + addRestTestsToProject([additionalTest], sourceSetName) + //intentionally adding to yamlRestTest source set since the .classes are copied from there + file("src/yamlRestTest/java/MockIT.java") << "import org.junit.Test;class MockIT { @Test public void doNothing() { }}" + + String api = "foo.json" + String test = "10_basic.yml" + //add the compatible test and api files, these are the prior version's normal yaml rest tests + file("distribution/bwc/maintenance/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" + file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" + + when: + def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + + then: + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SUCCESS + result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SUCCESS + result.task(transformTask).outcome == TaskOutcome.SUCCESS + + file("/build/${specIntermediateDir}/rest-api-spec/api/" + api).exists() + file("/build/${testIntermediateDir}/original/rest-api-spec/test/" + test).exists() + file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/" + test).exists() + file("/build/${testIntermediateDir}/original/rest-api-spec/test/" + test).exists() + file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/" + test).exists() + file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/" + test).text.contains("headers") //transformation adds this + file("/build/resources/${sourceSetName}/rest-api-spec/test/" + additionalTest).exists() + + //additionalTest is not copied from the prior version, and thus not in the intermediate directory, nor transformed + file("/build/resources/${sourceSetName}/" + testIntermediateDir + "/rest-api-spec/test/" + additionalTest).exists() == false + file("/build/resources/${sourceSetName}/rest-api-spec/test/" + additionalTest).text.contains("headers") == false + + file("/build/classes/java/yamlRestTest/MockIT.class").exists() //The "standard" runner is used to execute the compat test + + file("/build/resources/${sourceSetName}/rest-api-spec/api/" + wrongApi).exists() == false + file("/build/resources/${sourceSetName}/" + testIntermediateDir + "/rest-api-spec/test/" + wrongTest).exists() == false + file("/build/resources/${sourceSetName}/rest-api-spec/test/" + wrongTest).exists() == false + + result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.NO_SOURCE + result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE + + when: + result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + + then: + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(':copyRestCompatApiTask').outcome == TaskOutcome.UP_TO_DATE + result.task(':copyRestCompatTestTask').outcome == TaskOutcome.UP_TO_DATE + result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE + } + + def "yamlRestTestVxCompatTest is wired into check and checkRestCompat"() { + given: + withVersionCatalogue() + subProject(":distribution:bwc:maintenance") << """ + configurations { checkout } + artifacts { + checkout(new File(projectDir, "checkoutDir")) + } + """ + + buildFile << """ + plugins { + id 'elasticsearch.legacy-yaml-rest-compat-test' + } + + """ + + when: + def result = gradleRunner("check").build() + + then: + result.task(':check').outcome == TaskOutcome.UP_TO_DATE + result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE + result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE + result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE + result.task(transformTask).outcome == TaskOutcome.NO_SOURCE + + when: + buildFile << """ + ext.bwc_tests_enabled = false + """ + result = gradleRunner("check").build() + + then: + result.task(':check').outcome == TaskOutcome.UP_TO_DATE + result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE + result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED + result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SKIPPED + result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SKIPPED + result.task(transformTask).outcome == TaskOutcome.SKIPPED + } + + def "transform task executes and works as configured"() { + given: + internalBuild() + + subProject(":distribution:bwc:maintenance") << """ + configurations { checkout } + artifacts { + checkout(new File(projectDir, "checkoutDir")) + } + """ + + buildFile << """ + apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' + + // avoids a dependency problem in this test, the distribution in use here is inconsequential to the test + import org.elasticsearch.gradle.testclusters.TestDistribution; + + dependencies { + yamlRestTestImplementation "junit:junit:4.12" + } + tasks.named("yamlRestTestV${compatibleVersion}CompatTransform").configure({ task -> + task.skipTest("test/test/two", "This is a test to skip test two") + task.replaceValueInMatch("_type", "_doc") + task.replaceValueInMatch("_source.values", ["z", "x", "y"], "one") + task.removeMatch("_source.blah") + task.removeMatch("_source.junk", "two") + task.addMatch("_source.added", [name: 'jake', likes: 'cheese'], "one") + task.addWarning("one", "warning1", "warning2") + task.addWarningRegex("two", "regex warning here .* [a-z]") + task.addAllowedWarning("added allowed warning") + task.addAllowedWarningRegex("added allowed warning regex .* [0-9]") + task.removeWarning("one", "warning to remove") + task.replaceIsTrue("value_to_replace", "replaced_value") + task.replaceIsFalse("value_to_replace", "replaced_value") + task.replaceKeyInDo("do_.some.key_to_replace", "do_.some.key_that_was_replaced") + task.replaceKeyInDo("do_.some.key_to_replace_in_two", "do_.some.key_that_was_replaced_in_two", "two") + task.replaceKeyInMatch("match_.some.key_to_replace", "match_.some.key_that_was_replaced") + task.replaceKeyInLength("key.in_length_to_replace", "key.in_length_that_was_replaced") + task.replaceValueInLength("value_to_replace", 99, "one") + task.replaceValueTextByKeyValue("keyvalue", "toreplace", "replacedkeyvalue") + task.replaceValueTextByKeyValue("index", "test", "test2", "two") + }) + // can't actually spin up test cluster from this test + tasks.withType(Test).configureEach{ enabled = false } + """ + + setupRestResources([], []) + + file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ + "one": + - do: + do_.some.key_to_replace: + index: test + id: 1 + keyvalue : toreplace + do_.some.key_to_replace_in_two: + no_change_here: "because it's not in test 'two'" + warnings: + - "warning to remove" + - match: { _source.values: ["foo"] } + - match: { _type: "_foo" } + - match: { _source.blah: 1234 } + - match: { _source.junk: true } + - match: { match_.some.key_to_replace: true } + - is_true: "value_to_replace" + - is_false: "value_to_replace" + - is_true: "value_not_to_replace" + - is_false: "value_not_to_replace" + - length: { key.in_length_to_replace: 1 } + - length: { value_to_replace: 1 } + --- + "two": + - do: + get: + index: test + id: 1 + do_.some.key_to_replace_in_two: + changed_here: "because it is in test 'two'" + - match: { _source.values: ["foo"] } + - match: { _type: "_foo" } + - match: { _source.blah: 1234 } + - match: { _source.junk: true } + - is_true: "value_to_replace" + - is_false: "value_to_replace" + - is_true: "value_not_to_replace" + - is_false: "value_not_to_replace" + - length: { value_not_to_replace: 1 } + --- + "use cat with no header": + - do: + cat.indices: + {} + - match: {} + """.stripIndent() + when: + def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() + + then: + + result.task(transformTask).outcome == TaskOutcome.SUCCESS + + + file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/test.yml" ).exists() + List actual = READER.readValues(file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/test.yml")).readAll() + List expectedAll = READER.readValues( + """ + --- + setup: + - skip: + features: + - "headers" + - "warnings" + - "warnings_regex" + - "allowed_warnings" + - "allowed_warnings_regex" + --- + one: + - do: + do_.some.key_that_was_replaced: + index: "test" + id: 1 + keyvalue : replacedkeyvalue + do_.some.key_to_replace_in_two: + no_change_here: "because it's not in test 'two'" + warnings: + - "warning1" + - "warning2" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings: + - "added allowed warning" + allowed_warnings_regex: + - "added allowed warning regex .* [0-9]" + - match: + _source.values: + - "z" + - "x" + - "y" + - match: + _type: "_doc" + - match: {} + - match: + _source.junk: true + - match: + match_.some.key_that_was_replaced: true + - is_true: "replaced_value" + - is_false: "replaced_value" + - is_true: "value_not_to_replace" + - is_false: "value_not_to_replace" + - length: { key.in_length_that_was_replaced: 1 } + - length: { value_to_replace: 99 } + - match: + _source.added: + name: "jake" + likes: "cheese" + + --- + two: + - skip: + version: "all" + reason: "This is a test to skip test two" + - do: + get: + index: "test2" + id: 1 + do_.some.key_that_was_replaced_in_two: + changed_here: "because it is in test 'two'" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + warnings_regex: + - "regex warning here .* [a-z]" + allowed_warnings: + - "added allowed warning" + allowed_warnings_regex: + - "added allowed warning regex .* [0-9]" + - match: + _source.values: + - "foo" + - match: + _type: "_doc" + - match: {} + - match: {} + - is_true: "replaced_value" + - is_false: "replaced_value" + - is_true: "value_not_to_replace" + - is_false: "value_not_to_replace" + - length: { value_not_to_replace: 1 } + --- + "use cat with no header": + - do: + cat.indices: + {} + allowed_warnings: + - "added allowed warning" + allowed_warnings_regex: + - "added allowed warning regex .* [0-9]" + - match: {} + """.stripIndent()).readAll() + + expectedAll.eachWithIndex{ ObjectNode expected, int i -> + if(expected != actual.get(i)) { + println("\nTransformed Test:") + SequenceWriter sequenceWriter = WRITER.writeValues(System.out) + for (ObjectNode transformedTest : actual) { + sequenceWriter.write(transformedTest) + } + sequenceWriter.close() + } + assert expected == actual.get(i) + } + + when: + result = gradleRunner(transformTask).build() + + then: + result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE + + when: + buildFile.write(buildFile.text.replace("blah", "baz")) + result = gradleRunner(transformTask).build() + + then: + result.task(transformTask).outcome == TaskOutcome.SUCCESS + } + +} diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy new file mode 100644 index 000000000000..599ad6d2df86 --- /dev/null +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPluginFuncTest.groovy @@ -0,0 +1,206 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest + +import spock.lang.IgnoreIf + +import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest +import org.gradle.testkit.runner.TaskOutcome + +@IgnoreIf({ os.isWindows() }) +class LegacyYamlRestTestPluginFuncTest extends AbstractRestResourcesFuncTest { + + def "yamlRestTest does nothing when there are no tests"() { + given: + // RestIntegTestTask not cc compatible due to + configurationCacheCompatible = false + buildFile << """ + plugins { + id 'elasticsearch.legacy-yaml-rest-test' + } + """ + + when: + def result = gradleRunner("yamlRestTest").build() + + then: + result.task(':yamlRestTest').outcome == TaskOutcome.NO_SOURCE + result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE + result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.NO_SOURCE + } + + def "yamlRestTest executes and copies api and tests to correct source set"() { + given: + // RestIntegTestTask not cc compatible due to + configurationCacheCompatible = false + internalBuild() + buildFile << """ + apply plugin: 'elasticsearch.legacy-yaml-rest-test' + + dependencies { + yamlRestTestImplementation "junit:junit:4.12" + } + + // can't actually spin up test cluster from this test + tasks.withType(Test).configureEach{ enabled = false } + + tasks.register("printYamlRestTestClasspath").configure { + doLast { + println sourceSets.yamlRestTest.runtimeClasspath.asPath + } + } + """ + String api = "foo.json" + setupRestResources([api]) + addRestTestsToProject(["10_basic.yml"], "yamlRestTest") + file("src/yamlRestTest/java/MockIT.java") << "import org.junit.Test;class MockIT { @Test public void doNothing() { }}" + + when: + def result = gradleRunner("yamlRestTest", "printYamlRestTestClasspath").build() + + then: + result.task(':yamlRestTest').outcome == TaskOutcome.SKIPPED + result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.SUCCESS + result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE + + file("/build/restResources/yamlSpecs/rest-api-spec/api/" + api).exists() + file("/build/resources/yamlRestTest/rest-api-spec/test/10_basic.yml").exists() + file("/build/classes/java/yamlRestTest/MockIT.class").exists() + + // check that our copied specs and tests are on the yamlRestTest classpath + result.output.contains("./build/restResources/yamlSpecs") + result.output.contains("./build/restResources/yamlTests") + + when: + result = gradleRunner("yamlRestTest").build() + + then: + result.task(':yamlRestTest').outcome == TaskOutcome.SKIPPED + result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.UP_TO_DATE + result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE + } + + def "#type projects are wired into test cluster setup"() { + given: + internalBuild() + localDistroSetup() + def distroVersion = VersionProperties.getElasticsearch() + + def subProjectBuildFile = subProject(pluginProjectPath) + subProjectBuildFile << """ + apply plugin: 'elasticsearch.esplugin' + apply plugin: 'elasticsearch.legacy-yaml-rest-test' + + dependencies { + yamlRestTestImplementation "junit:junit:4.12" + } + + esplugin { + description = 'test plugin' + classname = 'com.acme.plugin.TestPlugin' + } + + // for testing purposes only + configurations.compileOnly.dependencies.clear() + + testClusters { + yamlRestTest { + version = "$distroVersion" + testDistribution = 'INTEG_TEST' + } + } + """ + def testFile = new File(subProjectBuildFile.parentFile, 'src/yamlRestTest/java/org/acme/SomeTestIT.java') + testFile.parentFile.mkdirs() + testFile << """ + package org.acme; + + import org.junit.Test; + + public class SomeTestIT { + @Test + public void someMethod() { + } + } + """ + + when: + def result = gradleRunner("yamlRestTest", "--console", 'plain', '--stacktrace').buildAndFail() + + then: + result.task(":distribution:archives:integ-test-zip:buildExpanded").outcome == TaskOutcome.SUCCESS + result.getOutput().contains(expectedInstallLog) + + where: + type | pluginProjectPath | expectedInstallLog + "plugin" | ":plugins:plugin-a" | "installing 1 plugins in a single transaction" + "module" | ":modules:module-a" | "Installing 1 modules" + } + + private void localDistroSetup() { + settingsFile << """ + include ":distribution:archives:integ-test-zip" + """ + def distProjectFolder = file("distribution/archives/integ-test-zip") + file(distProjectFolder, 'current-marker.txt') << "current" + + def elasticPluginScript = file(distProjectFolder, 'src/bin/elasticsearch-plugin') + elasticPluginScript << """#!/bin/bash +@echo off +echo "Installing plugin \$0" +""" + assert elasticPluginScript.setExecutable(true) + + def elasticKeystoreScript = file(distProjectFolder, 'src/bin/elasticsearch-keystore') + elasticKeystoreScript << """#!/bin/bash +@echo off +echo "Installing keystore \$0" +""" + assert elasticKeystoreScript.setExecutable(true) + + def elasticScript = file(distProjectFolder, 'src/bin/elasticsearch') + elasticScript << """#!/bin/bash +@echo off +echo "Running elasticsearch \$0" +""" + assert elasticScript.setExecutable(true) + + file(distProjectFolder, 'src/config/elasticsearch.properties') << "some propes" + file(distProjectFolder, 'src/config/jvm.options') << """ +-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m +-XX:ErrorFile=logs/hs_err_pid%p.log +-XX:HeapDumpPath=data +""" + file(distProjectFolder, 'build.gradle') << """ + import org.gradle.api.internal.artifacts.ArtifactAttributes; + + apply plugin:'distribution' + def buildExpanded = tasks.register("buildExpanded", Copy) { + into("build/local") + + into('es-dummy-dist') { + from('src') + from('current-marker.txt') + } + } + + configurations { + extracted { + attributes { + attribute(ArtifactAttributes.ARTIFACT_FORMAT, "directory") + } + } + } + artifacts { + it.add("extracted", buildExpanded) + } + """ + } +} diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy deleted file mode 100644 index 58f894489ab7..000000000000 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.test.rest - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SequenceWriter -import com.fasterxml.jackson.databind.node.ObjectNode -import com.fasterxml.jackson.dataformat.yaml.YAMLFactory -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.fixtures.AbstractRestResourcesFuncTest -import org.elasticsearch.gradle.VersionProperties -import org.gradle.testkit.runner.TaskOutcome - -class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { - - def compatibleVersion = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1 - def specIntermediateDir = "restResources/v${compatibleVersion}/yamlSpecs" - def testIntermediateDir = "restResources/v${compatibleVersion}/yamlTests" - def transformTask = ":yamlRestTestV${compatibleVersion}CompatTransform" - def YAML_FACTORY = new YAMLFactory() - def MAPPER = new ObjectMapper(YAML_FACTORY) - def READER = MAPPER.readerFor(ObjectNode.class) - def WRITER = MAPPER.writerFor(ObjectNode.class) - - def setup() { - // not cc compatible due to: - // 1. TestClustersPlugin not cc compatible due to listener registration - // 2. RestIntegTestTask not cc compatible due to - configurationCacheCompatible = false - } - def "yamlRestTestVxCompatTest does nothing when there are no tests"() { - given: - subProject(":distribution:bwc:maintenance") << """ - configurations { checkout } - artifacts { - checkout(new File(projectDir, "checkoutDir")) - } - """ - - buildFile << """ - plugins { - id 'elasticsearch.yaml-rest-compat-test' - } - """ - - when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest", '--stacktrace').build() - - then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE - result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE - result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE - result.task(transformTask).outcome == TaskOutcome.NO_SOURCE - } - - def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:maintenance"() { - given: - internalBuild() - - subProject(":distribution:bwc:maintenance") << """ - configurations { checkout } - artifacts { - checkout(new File(projectDir, "checkoutDir")) - } - """ - - buildFile << """ - apply plugin: 'elasticsearch.yaml-rest-compat-test' - - // avoids a dependency problem in this test, the distribution in use here is inconsequential to the test - import org.elasticsearch.gradle.testclusters.TestDistribution; - - dependencies { - yamlRestTestImplementation "junit:junit:4.12" - } - - // can't actually spin up test cluster from this test - tasks.withType(Test).configureEach{ enabled = false } - """ - - String wrongApi = "wrong_version.json" - String wrongTest = "wrong_version.yml" - String additionalTest = "additional_test.yml" - setupRestResources([wrongApi], [wrongTest]) //setups up resources for current version, which should not be used for this test - String sourceSetName = "yamlRestTestV" + compatibleVersion + "Compat" - addRestTestsToProject([additionalTest], sourceSetName) - //intentionally adding to yamlRestTest source set since the .classes are copied from there - file("src/yamlRestTest/java/MockIT.java") << "import org.junit.Test;class MockIT { @Test public void doNothing() { }}" - - String api = "foo.json" - String test = "10_basic.yml" - //add the compatible test and api files, these are the prior version's normal yaml rest tests - file("distribution/bwc/maintenance/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" - file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" - - when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() - - then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED - result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SUCCESS - result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SUCCESS - result.task(transformTask).outcome == TaskOutcome.SUCCESS - - file("/build/${specIntermediateDir}/rest-api-spec/api/" + api).exists() - file("/build/${testIntermediateDir}/original/rest-api-spec/test/" + test).exists() - file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/" + test).exists() - file("/build/${testIntermediateDir}/original/rest-api-spec/test/" + test).exists() - file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/" + test).exists() - file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/" + test).text.contains("headers") //transformation adds this - file("/build/resources/${sourceSetName}/rest-api-spec/test/" + additionalTest).exists() - - //additionalTest is not copied from the prior version, and thus not in the intermediate directory, nor transformed - file("/build/resources/${sourceSetName}/" + testIntermediateDir + "/rest-api-spec/test/" + additionalTest).exists() == false - file("/build/resources/${sourceSetName}/rest-api-spec/test/" + additionalTest).text.contains("headers") == false - - file("/build/classes/java/yamlRestTest/MockIT.class").exists() //The "standard" runner is used to execute the compat test - - file("/build/resources/${sourceSetName}/rest-api-spec/api/" + wrongApi).exists() == false - file("/build/resources/${sourceSetName}/" + testIntermediateDir + "/rest-api-spec/test/" + wrongTest).exists() == false - file("/build/resources/${sourceSetName}/rest-api-spec/test/" + wrongTest).exists() == false - - result.task(':copyRestApiSpecsTask').outcome == TaskOutcome.NO_SOURCE - result.task(':copyYamlTestsTask').outcome == TaskOutcome.NO_SOURCE - - when: - result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() - - then: - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED - result.task(':copyRestCompatApiTask').outcome == TaskOutcome.UP_TO_DATE - result.task(':copyRestCompatTestTask').outcome == TaskOutcome.UP_TO_DATE - result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE - } - - def "yamlRestTestVxCompatTest is wired into check and checkRestCompat"() { - given: - withVersionCatalogue() - subProject(":distribution:bwc:maintenance") << """ - configurations { checkout } - artifacts { - checkout(new File(projectDir, "checkoutDir")) - } - """ - - buildFile << """ - plugins { - id 'elasticsearch.yaml-rest-compat-test' - } - - """ - - when: - def result = gradleRunner("check").build() - - then: - result.task(':check').outcome == TaskOutcome.UP_TO_DATE - result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.NO_SOURCE - result.task(':copyRestCompatApiTask').outcome == TaskOutcome.NO_SOURCE - result.task(':copyRestCompatTestTask').outcome == TaskOutcome.NO_SOURCE - result.task(transformTask).outcome == TaskOutcome.NO_SOURCE - - when: - buildFile << """ - ext.bwc_tests_enabled = false - """ - result = gradleRunner("check").build() - - then: - result.task(':check').outcome == TaskOutcome.UP_TO_DATE - result.task(':checkRestCompat').outcome == TaskOutcome.UP_TO_DATE - result.task(":yamlRestTestV${compatibleVersion}CompatTest").outcome == TaskOutcome.SKIPPED - result.task(':copyRestCompatApiTask').outcome == TaskOutcome.SKIPPED - result.task(':copyRestCompatTestTask').outcome == TaskOutcome.SKIPPED - result.task(transformTask).outcome == TaskOutcome.SKIPPED - } - - def "transform task executes and works as configured"() { - given: - internalBuild() - - subProject(":distribution:bwc:maintenance") << """ - configurations { checkout } - artifacts { - checkout(new File(projectDir, "checkoutDir")) - } - """ - - buildFile << """ - apply plugin: 'elasticsearch.yaml-rest-compat-test' - - // avoids a dependency problem in this test, the distribution in use here is inconsequential to the test - import org.elasticsearch.gradle.testclusters.TestDistribution; - - dependencies { - yamlRestTestImplementation "junit:junit:4.12" - } - tasks.named("yamlRestTestV${compatibleVersion}CompatTransform").configure({ task -> - task.skipTest("test/test/two", "This is a test to skip test two") - task.replaceValueInMatch("_type", "_doc") - task.replaceValueInMatch("_source.values", ["z", "x", "y"], "one") - task.removeMatch("_source.blah") - task.removeMatch("_source.junk", "two") - task.addMatch("_source.added", [name: 'jake', likes: 'cheese'], "one") - task.addWarning("one", "warning1", "warning2") - task.addWarningRegex("two", "regex warning here .* [a-z]") - task.addAllowedWarning("added allowed warning") - task.addAllowedWarningRegex("added allowed warning regex .* [0-9]") - task.removeWarning("one", "warning to remove") - task.replaceIsTrue("value_to_replace", "replaced_value") - task.replaceIsFalse("value_to_replace", "replaced_value") - task.replaceKeyInDo("do_.some.key_to_replace", "do_.some.key_that_was_replaced") - task.replaceKeyInDo("do_.some.key_to_replace_in_two", "do_.some.key_that_was_replaced_in_two", "two") - task.replaceKeyInMatch("match_.some.key_to_replace", "match_.some.key_that_was_replaced") - task.replaceKeyInLength("key.in_length_to_replace", "key.in_length_that_was_replaced") - task.replaceValueInLength("value_to_replace", 99, "one") - task.replaceValueTextByKeyValue("keyvalue", "toreplace", "replacedkeyvalue") - task.replaceValueTextByKeyValue("index", "test", "test2", "two") - }) - // can't actually spin up test cluster from this test - tasks.withType(Test).configureEach{ enabled = false } - """ - - setupRestResources([], []) - - file("distribution/bwc/maintenance/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ - "one": - - do: - do_.some.key_to_replace: - index: test - id: 1 - keyvalue : toreplace - do_.some.key_to_replace_in_two: - no_change_here: "because it's not in test 'two'" - warnings: - - "warning to remove" - - match: { _source.values: ["foo"] } - - match: { _type: "_foo" } - - match: { _source.blah: 1234 } - - match: { _source.junk: true } - - match: { match_.some.key_to_replace: true } - - is_true: "value_to_replace" - - is_false: "value_to_replace" - - is_true: "value_not_to_replace" - - is_false: "value_not_to_replace" - - length: { key.in_length_to_replace: 1 } - - length: { value_to_replace: 1 } - --- - "two": - - do: - get: - index: test - id: 1 - do_.some.key_to_replace_in_two: - changed_here: "because it is in test 'two'" - - match: { _source.values: ["foo"] } - - match: { _type: "_foo" } - - match: { _source.blah: 1234 } - - match: { _source.junk: true } - - is_true: "value_to_replace" - - is_false: "value_to_replace" - - is_true: "value_not_to_replace" - - is_false: "value_not_to_replace" - - length: { value_not_to_replace: 1 } - --- - "use cat with no header": - - do: - cat.indices: - {} - - match: {} - """.stripIndent() - when: - def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() - - then: - - result.task(transformTask).outcome == TaskOutcome.SUCCESS - - - file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/test.yml" ).exists() - List actual = READER.readValues(file("/build/${testIntermediateDir}/transformed/rest-api-spec/test/test.yml")).readAll() - List expectedAll = READER.readValues( - """ - --- - setup: - - skip: - features: - - "headers" - - "warnings" - - "warnings_regex" - - "allowed_warnings" - - "allowed_warnings_regex" - --- - one: - - do: - do_.some.key_that_was_replaced: - index: "test" - id: 1 - keyvalue : replacedkeyvalue - do_.some.key_to_replace_in_two: - no_change_here: "because it's not in test 'two'" - warnings: - - "warning1" - - "warning2" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - allowed_warnings: - - "added allowed warning" - allowed_warnings_regex: - - "added allowed warning regex .* [0-9]" - - match: - _source.values: - - "z" - - "x" - - "y" - - match: - _type: "_doc" - - match: {} - - match: - _source.junk: true - - match: - match_.some.key_that_was_replaced: true - - is_true: "replaced_value" - - is_false: "replaced_value" - - is_true: "value_not_to_replace" - - is_false: "value_not_to_replace" - - length: { key.in_length_that_was_replaced: 1 } - - length: { value_to_replace: 99 } - - match: - _source.added: - name: "jake" - likes: "cheese" - - --- - two: - - skip: - version: "all" - reason: "This is a test to skip test two" - - do: - get: - index: "test2" - id: 1 - do_.some.key_that_was_replaced_in_two: - changed_here: "because it is in test 'two'" - headers: - Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" - Accept: "application/vnd.elasticsearch+json;compatible-with=7" - warnings_regex: - - "regex warning here .* [a-z]" - allowed_warnings: - - "added allowed warning" - allowed_warnings_regex: - - "added allowed warning regex .* [0-9]" - - match: - _source.values: - - "foo" - - match: - _type: "_doc" - - match: {} - - match: {} - - is_true: "replaced_value" - - is_false: "replaced_value" - - is_true: "value_not_to_replace" - - is_false: "value_not_to_replace" - - length: { value_not_to_replace: 1 } - --- - "use cat with no header": - - do: - cat.indices: - {} - allowed_warnings: - - "added allowed warning" - allowed_warnings_regex: - - "added allowed warning regex .* [0-9]" - - match: {} - """.stripIndent()).readAll() - - expectedAll.eachWithIndex{ ObjectNode expected, int i -> - if(expected != actual.get(i)) { - println("\nTransformed Test:") - SequenceWriter sequenceWriter = WRITER.writeValues(System.out) - for (ObjectNode transformedTest : actual) { - sequenceWriter.write(transformedTest) - } - sequenceWriter.close() - } - assert expected == actual.get(i) - } - - when: - result = gradleRunner(transformTask).build() - - then: - result.task(transformTask).outcome == TaskOutcome.UP_TO_DATE - - when: - buildFile.write(buildFile.text.replace("blah", "baz")) - result = gradleRunner(transformTask).build() - - then: - result.task(transformTask).outcome == TaskOutcome.SUCCESS - } - -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle index b80c450c5914..a5e74c372129 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.bwc-test.gradle @@ -9,6 +9,8 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask ext.bwcTaskName = { Version version -> return "v${version}#bwcTest" @@ -36,5 +38,17 @@ plugins.withType(ElasticsearchTestBasePlugin) { } } +plugins.withType(InternalJavaRestTestPlugin) { + tasks.named("javaRestTest") { + enabled = false + } + + tasks.withType(StandaloneRestIntegTestTask).configureEach { + testClassesDirs = sourceSets.javaRestTest.output.classesDirs + classpath = sourceSets.javaRestTest.runtimeClasspath + usesDefaultDistribution() + } +} + tasks.matching { it.name.equals("check") }.configureEach {dependsOn(bwcTestSnapshots) } tasks.matching { it.name.equals("test") }.configureEach {enabled = false} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index 0fe3e8dd7846..08fbc5b67e97 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -9,6 +9,8 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.rest.RestTestBasePlugin +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask import org.elasticsearch.gradle.testclusters.TestClustersAware import org.elasticsearch.gradle.testclusters.TestDistribution @@ -76,6 +78,14 @@ if (BuildParams.inFipsJvm) { keystorePassword 'keystore-password' } } + + plugins.withType(RestTestBasePlugin) { + tasks.withType(StandaloneRestIntegTestTask).configureEach { + inputs.files(extraFipsJarsConfiguration).withNormalizer(ClasspathNormalizer) + nonInputProperties.systemProperty "tests.cluster.fips.jars.path", "${-> extraFipsJarsConfiguration.asPath}" + } + } + project.tasks.withType(Test).configureEach { Test task -> dependsOn 'fipsResources' task.systemProperty('javax.net.ssl.trustStorePassword', 'password') diff --git a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle index 1d1112706fd0..6a6dbe43875c 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.runtime-jdk-provision.gradle @@ -10,6 +10,8 @@ import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.rest.RestTestBasePlugin +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask // gradle has an open issue of failing applying plugins in // precompiled script plugins (see https://github.com/gradle/gradle/issues/17004) @@ -42,4 +44,13 @@ configure(allprojects) { } } } + + project.plugins.withType(RestTestBasePlugin) { + tasks.withType(StandaloneRestIntegTestTask).configureEach { + if (BuildParams.getIsRuntimeJavaHomeSet() == false) { + dependsOn(project.jdks.provisioned_runtime) + nonInputProperties.systemProperty("tests.runtime.java", "${-> project.jdks.provisioned_runtime.javaHomePath}") + } + } + } } diff --git a/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle new file mode 100644 index 000000000000..c52bd9d1d52c --- /dev/null +++ b/build-tools-internal/src/main/groovy/elasticsearch.stable-api.gradle @@ -0,0 +1,50 @@ +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties +import org.elasticsearch.gradle.internal.BwcVersions +import org.elasticsearch.gradle.internal.JarApiComparisonTask +import org.elasticsearch.gradle.internal.info.BuildParams + +import static org.elasticsearch.gradle.internal.InternalDistributionBwcSetupPlugin.buildBwcTaskName + +configurations { + newJar +} + +dependencies { + newJar project(":libs:${project.name}") +} + +BuildParams.bwcVersions.withIndexCompatible({ it.onOrAfter(Version.fromString(ext.stableApiSince)) + && it != VersionProperties.elasticsearchVersion +}) { bwcVersion, baseName -> + + BwcVersions.UnreleasedVersionInfo unreleasedVersion = BuildParams.bwcVersions.unreleasedInfo(bwcVersion) + + configurations { + "oldJar${baseName}" { + transitive = false + } + } + + dependencies { + if (unreleasedVersion) { + // For unreleased snapshot versions, build them from source + "oldJar${baseName}"(files(project(unreleasedVersion.gradleProjectPath).tasks.named(buildBwcTaskName(project.name)))) + } else { + // For released versions, download it + "oldJar${baseName}"("org.elasticsearch:${project.name}:${bwcVersion}") + } + } + + def jarApiComparisonTask = tasks.register(bwcTaskName(bwcVersion), JarApiComparisonTask) { + oldJar = configurations."oldJar${baseName}" + newJar = configurations.newJar + } + + jarApiComparisonTask.configure { + onlyIf { + !Os.isFamily(Os.FAMILY_WINDOWS) + } + } +} diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy index f56a9fefceac..874141f2135a 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy @@ -38,7 +38,7 @@ class DocsTestPlugin implements Plugin { @Override void apply(Project project) { - project.pluginManager.apply('elasticsearch.internal-yaml-rest-test') + project.pluginManager.apply('elasticsearch.legacy-yaml-rest-test') String distribution = System.getProperty('tests.distribution', 'default') // The distribution can be configured with -Dtests.distribution on the command line diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy index b9b77da16bd6..eda86355ee30 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy @@ -210,6 +210,12 @@ class RestTestsFromSnippetsTask extends SnippetsTask { return } if (snippet.testResponse || snippet.language == 'console-result') { + if (previousTest == null) { + throw new InvalidUserDataException("$snippet: No paired previous test") + } + if (previousTest.path != snippet.path) { + throw new InvalidUserDataException("$snippet: Result can't be first in file") + } response(snippet) return } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java index 28b6c1e66992..ef7c9507d865 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcSetupExtension.java @@ -12,6 +12,7 @@ import org.apache.tools.ant.taskdefs.condition.Os; import org.elasticsearch.gradle.LoggedExec; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.internal.info.BuildParams; import org.gradle.api.Action; import org.gradle.api.GradleException; import org.gradle.api.Project; @@ -37,30 +38,35 @@ public class BwcSetupExtension { private static final Version BUILD_TOOL_MINIMUM_VERSION = Version.fromString("7.14.0"); private final Project project; private final Provider unreleasedVersionInfo; - private final Provider bwcTaskThrottleProvider; private Provider checkoutDir; public BwcSetupExtension( Project project, Provider unreleasedVersionInfo, - Provider bwcTaskThrottleProvider, Provider checkoutDir ) { this.project = project; this.unreleasedVersionInfo = unreleasedVersionInfo; - this.bwcTaskThrottleProvider = bwcTaskThrottleProvider; this.checkoutDir = checkoutDir; } TaskProvider bwcTask(String name, Action configuration) { - return createRunBwcGradleTask(project, name, configuration); + return bwcTask(name, configuration, true); } - private TaskProvider createRunBwcGradleTask(Project project, String name, Action configAction) { + TaskProvider bwcTask(String name, Action configuration, boolean useUniqueUserHome) { + return createRunBwcGradleTask(project, name, configuration, useUniqueUserHome); + } + + private TaskProvider createRunBwcGradleTask( + Project project, + String name, + Action configAction, + boolean useUniqueUserHome + ) { return project.getTasks().register(name, LoggedExec.class, loggedExec -> { loggedExec.dependsOn("checkoutBwcBranch"); - loggedExec.usesService(bwcTaskThrottleProvider); loggedExec.getWorkingDir().set(checkoutDir.get()); loggedExec.getEnvironment().put("JAVA_HOME", unreleasedVersionInfo.zip(checkoutDir, (version, checkoutDir) -> { @@ -68,6 +74,11 @@ private TaskProvider createRunBwcGradleTask(Project project, String return getJavaHome(Integer.parseInt(minimumCompilerVersion)); })); + if (BuildParams.isCi() && Os.isFamily(Os.FAMILY_WINDOWS) == false) { + // TODO: Disabled for now until we can figure out why files are getting corrupted + // loggedExec.getEnvironment().put("GRADLE_RO_DEP_CACHE", System.getProperty("user.home") + "/gradle_ro_cache"); + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { loggedExec.getExecutable().set("cmd"); loggedExec.args("/C", "call", new File(checkoutDir.get(), "gradlew").toString()); @@ -75,7 +86,11 @@ private TaskProvider createRunBwcGradleTask(Project project, String loggedExec.getExecutable().set(new File(checkoutDir.get(), "gradlew").toString()); } - loggedExec.args("-g", project.getGradle().getGradleUserHomeDir().toString()); + if (useUniqueUserHome) { + loggedExec.dependsOn("setupGradleUserHome"); + loggedExec.args("-g", project.getGradle().getGradleUserHomeDir().getAbsolutePath() + "-" + project.getName()); + } + if (project.getGradle().getStartParameter().isOffline()) { loggedExec.args("--offline"); } @@ -101,6 +116,9 @@ private TaskProvider createRunBwcGradleTask(Project project, String if (project.getGradle().getStartParameter().isParallelProjectExecutionEnabled()) { loggedExec.args("--parallel"); } + for (File initScript : project.getGradle().getStartParameter().getInitScripts()) { + loggedExec.args("-I", initScript.getAbsolutePath()); + } loggedExec.getIndentingConsoleOutput().set(unreleasedVersionInfo.map(v -> v.version().toString())); configAction.execute(loggedExec); }); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java index fa578ffa7e20..7a5bead71fb0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchJavaBasePlugin.java @@ -38,8 +38,8 @@ public class ElasticsearchJavaBasePlugin implements Plugin { public void apply(Project project) { // make sure the global build info plugin is applied to the root project project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); - // common repositories setup project.getPluginManager().apply(JavaBasePlugin.class); + // common repositories setup project.getPluginManager().apply(RepositoriesSetupPlugin.class); project.getPluginManager().apply(ElasticsearchTestBasePlugin.class); project.getPluginManager().apply(PrecommitTaskPlugin.class); @@ -146,6 +146,7 @@ public static void configureCompile(Project project) { */ public static void configureInputNormalization(Project project) { project.getNormalization().getRuntimeClasspath().ignore("META-INF/MANIFEST.MF"); + project.getNormalization().getRuntimeClasspath().ignore("IMPL-JARS/**/META-INF/MANIFEST.MF"); } private static Provider releaseVersionProviderFromCompileTask(Project project, AbstractCompile compileTask) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index c33545ada993..c6758092b17e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -54,7 +54,8 @@ public void apply(Project project) { project.getTasks().withType(Test.class).configureEach(test -> { File testOutputDir = new File(test.getReports().getJunitXml().getOutputLocation().getAsFile().get(), "output"); - ErrorReportingTestListener listener = new ErrorReportingTestListener(test.getTestLogging(), test.getLogger(), testOutputDir); + ErrorReportingTestListener listener = new ErrorReportingTestListener(test, testOutputDir); + test.getExtensions().getExtraProperties().set("dumpOutputOnFailure", true); test.getExtensions().add("errorReportingTestListener", listener); test.addTestOutputListener(listener); test.addTestListener(listener); @@ -88,7 +89,7 @@ public void execute(Task t) { test.getJvmArgumentProviders().add(nonInputProperties); test.getExtensions().add("nonInputProperties", nonInputProperties); - test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName())); + test.setWorkingDir(project.file(project.getBuildDir() + "/testrun/" + test.getName().replace("#", "_"))); test.setMaxParallelForks(Integer.parseInt(System.getProperty("tests.jvms", BuildParams.getDefaultParallel().toString()))); test.exclude("**/*$*.class"); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index f974b02a1c5b..a32358c6db4f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -18,8 +18,7 @@ import org.gradle.api.Task; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; -import org.gradle.api.services.BuildService; -import org.gradle.api.services.BuildServiceParameters; +import org.gradle.api.tasks.Copy; import org.gradle.api.tasks.TaskProvider; import org.gradle.language.base.plugins.LifecycleBasePlugin; @@ -28,6 +27,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.stream.Collectors; import javax.inject.Inject; @@ -44,7 +44,6 @@ */ public class InternalDistributionBwcSetupPlugin implements Plugin { - private static final String BWC_TASK_THROTTLE_SERVICE = "bwcTaskThrottle"; private ProviderFactory providerFactory; @Inject @@ -55,26 +54,19 @@ public InternalDistributionBwcSetupPlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); - Provider bwcTaskThrottleProvider = project.getGradle() - .getSharedServices() - .registerIfAbsent(BWC_TASK_THROTTLE_SERVICE, BwcTaskThrottle.class, spec -> spec.getMaxParallelUsages().set(1)); BuildParams.getBwcVersions() .forPreviousUnreleased( (BwcVersions.UnreleasedVersionInfo unreleasedVersion) -> { - configureBwcProject(project.project(unreleasedVersion.gradleProjectPath()), unreleasedVersion, bwcTaskThrottleProvider); + configureBwcProject(project.project(unreleasedVersion.gradleProjectPath()), unreleasedVersion); } ); } - private void configureBwcProject( - Project project, - BwcVersions.UnreleasedVersionInfo versionInfo, - Provider bwcTaskThrottleProvider - ) { + private void configureBwcProject(Project project, BwcVersions.UnreleasedVersionInfo versionInfo) { Provider versionInfoProvider = providerFactory.provider(() -> versionInfo); Provider checkoutDir = versionInfoProvider.map(info -> new File(project.getBuildDir(), "bwc/checkout-" + info.branch())); BwcSetupExtension bwcSetupExtension = project.getExtensions() - .create("bwcSetup", BwcSetupExtension.class, project, versionInfoProvider, bwcTaskThrottleProvider, checkoutDir); + .create("bwcSetup", BwcSetupExtension.class, project, versionInfoProvider, checkoutDir); BwcGitExtension gitExtension = project.getPlugins().apply(InternalBwcGitPlugin.class).getGitExtension(); Provider bwcVersion = versionInfoProvider.map(info -> info.version()); gitExtension.setBwcVersion(versionInfoProvider.map(info -> info.version())); @@ -87,6 +79,15 @@ private void configureBwcProject( TaskProvider buildBwcTaskProvider = project.getTasks().register("buildBwc"); List distributionProjects = resolveArchiveProjects(checkoutDir.get(), bwcVersion.get()); + // Setup gradle user home directory + project.getTasks().register("setupGradleUserHome", Copy.class, copy -> { + copy.into(project.getGradle().getGradleUserHomeDir().getAbsolutePath() + "-" + project.getName()); + copy.from(project.getGradle().getGradleUserHomeDir().getAbsolutePath(), copySpec -> { + copySpec.include("gradle.properties"); + copySpec.include("init.d/*"); + }); + }); + for (DistributionProject distributionProject : distributionProjects) { createBuildBwcTask( bwcSetupExtension, @@ -120,6 +121,35 @@ private void configureBwcProject( buildBwcTaskProvider, "assemble" ); + + // for versions before 8.7.0, we do not need to set up stable API bwc + if (bwcVersion.get().before(Version.fromString("8.7.0"))) { + return; + } + + for (Project stableApiProject : resolveStableProjects(project)) { + + String relativeDir = project.getRootProject().relativePath(stableApiProject.getProjectDir()); + + DistributionProjectArtifact stableAnalysisPluginProjectArtifact = new DistributionProjectArtifact( + new File( + checkoutDir.get(), + relativeDir + "/build/distributions/" + stableApiProject.getName() + "-" + bwcVersion.get() + "-SNAPSHOT.jar" + ), + null + ); + + createBuildBwcTask( + bwcSetupExtension, + project, + bwcVersion, + stableApiProject.getName(), + "libs/" + stableApiProject.getName(), + stableAnalysisPluginProjectArtifact, + buildBwcTaskProvider, + "assemble" + ); + } } private void registerBwcDistributionArtifacts(Project bwcProject, DistributionProject distributionProject) { @@ -209,7 +239,16 @@ private static List resolveArchiveProjects(File checkoutDir }).collect(Collectors.toList()); } - private static String buildBwcTaskName(String projectName) { + private static List resolveStableProjects(Project project) { + Set stableProjectNames = Set.of("elasticsearch-logging", "elasticsearch-plugin-api", "elasticsearch-plugin-analysis-api"); + return project.findProject(":libs") + .getSubprojects() + .stream() + .filter(subproject -> stableProjectNames.contains(subproject.getName())) + .toList(); + } + + public static String buildBwcTaskName(String projectName) { return "buildBwc" + stream(projectName.split("-")).map(i -> i.substring(0, 1).toUpperCase(Locale.ROOT) + i.substring(1)) .collect(Collectors.joining()); @@ -237,11 +276,7 @@ static void createBuildBwcTask( } else { c.getOutputs().files(expectedOutputFile); } - c.getOutputs().cacheIf("BWC distribution caching is disabled on 'main' branch", task -> { - String gitBranch = System.getenv("GIT_BRANCH"); - return BuildParams.isCi() - && (gitBranch == null || gitBranch.endsWith("master") == false || gitBranch.endsWith("main") == false); - }); + c.getOutputs().doNotCacheIf("BWC distribution caching is disabled for local builds", task -> BuildParams.isCi() == false); c.getArgs().add(projectPath.replace('/', ':') + ":" + assembleTaskName); if (project.getGradle().getStartParameter().isBuildCacheEnabled()) { c.getArgs().add("--build-cache"); @@ -323,5 +358,4 @@ private static class DistributionProjectArtifact { } } - public abstract class BwcTaskThrottle implements BuildService {} } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java index 0a29741be893..51239853aa12 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionModuleCheckTaskProvider.java @@ -54,8 +54,8 @@ public class InternalDistributionModuleCheckTaskProvider { "org.elasticsearch.geo", "org.elasticsearch.logging", "org.elasticsearch.lz4", - "org.elasticsearch.plugin.analysis.api", - "org.elasticsearch.plugin.api", + "org.elasticsearch.plugin", + "org.elasticsearch.plugin.analysis", "org.elasticsearch.pluginclassloader", "org.elasticsearch.securesm", "org.elasticsearch.server", diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java index fae845b22965..4952085f466b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalTestArtifactExtension.java @@ -32,7 +32,7 @@ public void registerTestArtifactFromSourceSet(SourceSet sourceSet) { JavaPluginExtension javaPluginExtension = project.getExtensions().getByType(JavaPluginExtension.class); javaPluginExtension.registerFeature(name + "Artifacts", featureSpec -> { featureSpec.usingSourceSet(sourceSet); - featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-" + name + "-artifacts", "1.0"); + featureSpec.capability("org.elasticsearch.gradle", project.getName() + "-test-artifacts", "1.0"); // This feature is only used internally in the // elasticsearch build so we do not need any publication. featureSpec.disablePublication(); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java new file mode 100644 index 000000000000..b1da632d84cd --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/JarApiComparisonTask.java @@ -0,0 +1,224 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitTask; +import org.gradle.api.file.FileCollection; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.CacheableTask; +import org.gradle.api.tasks.CompileClasspath; +import org.gradle.api.tasks.TaskAction; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.jar.JarFile; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.zip.ZipEntry; + +/** + * This implementation of a jar API comparison uses the "javap" tool to compare + * the "signatures" of two different jars. We assume that calling out to javap + * is not too expensive at this stage of the stable API project. We also assume + * that for every public class, method, and field, javap will print a consistent + * single line. This should let us make string comparisons, rather than having + * to parse the output of javap. + *

+ * While the above assumptions appear to hold, they are not guaranteed, and hence + * brittle. We could overcome these problems with an ASM implementation of the + * Jar Scanner. + *

+ * We also assume that we will not be comparing multi-version JARs. + *

+ * This "javap" approach has a few further drawbacks: + *

    + *
  1. We don't account for class visibility when examining fields and methods.
  2. + *
  3. We don't consider what is exported from the module. Is a public method from + * a non-exported package considered part of the stable api?
  4. + *
  5. Changing method types to their superclass or return types to an implementation + * class will be considered a change by this approach, even though that doesn't break + * an API.
  6. + *
  7. Finally, moving a method up the class hierarchy is not really a breaking change, + * but it will trip this test.
  8. + *
+ */ +@CacheableTask +public abstract class JarApiComparisonTask extends PrecommitTask { + + @TaskAction + public void compare() { + FileCollection fileCollection = getOldJar().get(); + File newJarFile = getNewJar().get().getSingleFile(); + + Set oldJarNames = fileCollection.getFiles().stream().map(File::getName).collect(Collectors.toSet()); + if (oldJarNames.size() > 1) { + throw new IllegalStateException("Expected a single original jar, but found: " + oldJarNames); + } + if (oldJarNames.contains(newJarFile.getName())) { + throw new IllegalStateException( + "We should be comparing different jars, but original and new jars were both: " + newJarFile.getAbsolutePath() + ); + } + + JarScanner oldJS = new JarScanner(getOldJar().get().getSingleFile().getPath()); + JarScanner newJS = new JarScanner(newJarFile.getPath()); + try { + JarScanner.compareSignatures(oldJS.jarSignature(), newJS.jarSignature()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @CompileClasspath + public abstract Property getOldJar(); + + @CompileClasspath + public abstract Property getNewJar(); + + public static class JarScanner { + + private final String path; + + public JarScanner(String path) { + this.path = path; + } + + private String getPath() { + return path; + } + + /** + * Get a list of class names contained in this jar by looking for file names + * that end in ".class" + */ + List classNames() throws IOException { + Pattern classEnding = Pattern.compile(".*\\.class$"); + try (JarFile jf = new JarFile(this.path)) { + return jf.stream().map(ZipEntry::getName).filter(classEnding.asMatchPredicate()).collect(Collectors.toList()); + } + } + + /** + * Given a path to a file in the jar, get the output of javap as a list of strings. + */ + public List disassembleFromJar(String fileInJarPath, String classpath) { + String location = "jar:file://" + getPath() + "!/" + fileInJarPath; + return disassemble(location, getPath(), classpath); + } + + /** + * Invoke javap on a class file, optionally providing a module path or class path + */ + static List disassemble(String location, String modulePath, String classpath) { + ProcessBuilder pb = new ProcessBuilder(); + List command = new ArrayList<>(); + command.add("javap"); + if (modulePath != null) { + command.add("--module-path"); + command.add(modulePath); + } + if (classpath != null) { + command.add("--class-path"); + command.add(classpath); + } + command.add(location); + pb.command(command.toArray(new String[] {})); + Process p; + try { + p = pb.start(); + p.onExit().get(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + InputStream streamToRead = p.exitValue() == 0 ? p.getInputStream() : p.getErrorStream(); + + try (BufferedReader br = new BufferedReader(new InputStreamReader(streamToRead))) { + return br.lines().toList(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Given the output of the javap command, that is, the disassembled class file, + * return a set of signatures for all public classes, methods, and fields. + */ + public static Set signaturesSet(List javapOutput) { + return javapOutput.stream().filter(s -> s.matches("^\\s*public.*")).collect(Collectors.toSet()); + } + + /** + * Given a disassembled module-info.class, return all unqualified exports. + */ + public static Set moduleInfoSignaturesSet(List javapOutput) { + return javapOutput.stream() + .filter(s -> s.matches("^\\s*exports.*")) + .filter(s -> s.matches(".* to$") == false) + .collect(Collectors.toSet()); + } + + /** + * Iterate over classes and gather signatures. + */ + public Map> jarSignature() throws IOException { + return this.classNames().stream().collect(Collectors.toMap(s -> s, s -> { + List disassembled = disassembleFromJar(s, null); + if ("module-info.class".equals(s)) { + return moduleInfoSignaturesSet(disassembled); + } + return signaturesSet(disassembled); + })); + } + + /** + * Comparison: The signatures are maps of class names to public class, field, or method + * declarations. + *

+ * First, we check that the new jar signature contains all the same classes + * as the old jar signature. If not, we return an error. + *

+ * Second, we iterate over the signature for each class. If a signature from the old + * jar is absent in the new jar, we add it to our list of errors. + *

+ * Note that it is fine for the new jar to have additional elements, as this + * is backwards compatible. + */ + public static void compareSignatures(Map> oldSignature, Map> newSignature) { + Set deletedClasses = new HashSet<>(oldSignature.keySet()); + deletedClasses.removeAll(newSignature.keySet()); + if (deletedClasses.size() > 0) { + throw new IllegalStateException("Classes from a previous version not found: " + deletedClasses); + } + + Map> deletedMembersMap = new HashMap<>(); + for (Map.Entry> entry : oldSignature.entrySet()) { + Set deletedMembers = new HashSet<>(entry.getValue()); + deletedMembers.removeAll(newSignature.get(entry.getKey())); + if (deletedMembers.size() > 0) { + deletedMembersMap.put(entry.getKey(), Set.copyOf(deletedMembers)); + } + } + if (deletedMembersMap.size() > 0) { + throw new IllegalStateException( + "Classes from a previous version have been modified, violating backwards compatibility: " + deletedMembersMap + ); + } + } + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java index d86ec9001d41..0afa675c9dfc 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ResolveAllDependencies.java @@ -21,7 +21,7 @@ import javax.inject.Inject; import static org.elasticsearch.gradle.DistributionDownloadPlugin.DISTRO_EXTRACTED_CONFIG_PREFIX; -import static org.elasticsearch.gradle.internal.rest.compat.YamlRestCompatTestPlugin.BWC_MINOR_CONFIG_NAME; +import static org.elasticsearch.gradle.internal.test.rest.compat.compat.LegacyYamlRestCompatTestPlugin.BWC_MINOR_CONFIG_NAME; public class ResolveAllDependencies extends DefaultTask { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java index 33a859747681..c3feb367c76e 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/BuildParams.java @@ -18,6 +18,7 @@ import java.time.ZonedDateTime; import java.util.Arrays; import java.util.List; +import java.util.Random; import java.util.function.Consumer; import static java.util.Objects.requireNonNull; @@ -110,10 +111,18 @@ public static String getTestSeed() { return value(testSeed); } + public static Random getRandom() { + return new Random(Long.parseUnsignedLong(testSeed.split(":")[0], 16)); + } + public static Boolean isCi() { return value(isCi); } + public static Boolean isGraalVmRuntime() { + return value(runtimeJavaDetails.toLowerCase().contains("graalvm")); + } + public static Integer getDefaultParallel() { return value(defaultParallel); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPlugin.java index 95d0ad2be4cc..6adf422133db 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/TestingConventionsPrecommitPlugin.java @@ -11,7 +11,8 @@ import org.elasticsearch.gradle.internal.conventions.precommit.PrecommitPlugin; import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin; import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin; -import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.LegacyJavaRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectProvider; import org.gradle.api.Project; @@ -43,8 +44,8 @@ public TaskProvider createTask(Project project) { }); }); - project.getPlugins().withType(InternalYamlRestTestPlugin.class, yamlRestTestPlugin -> { - NamedDomainObjectProvider sourceSet = sourceSets.named(InternalYamlRestTestPlugin.SOURCE_SET_NAME); + project.getPlugins().withType(LegacyYamlRestTestPlugin.class, yamlRestTestPlugin -> { + NamedDomainObjectProvider sourceSet = sourceSets.named(LegacyYamlRestTestPlugin.SOURCE_SET_NAME); setupTaskForSourceSet(project, sourceSet, t -> { t.getSuffixes().convention(List.of("IT")); t.getBaseClasses().convention(List.of("org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase")); @@ -68,8 +69,17 @@ public TaskProvider createTask(Project project) { }); }); + project.getPlugins().withType(LegacyJavaRestTestPlugin.class, javaRestTestPlugin -> { + NamedDomainObjectProvider sourceSet = sourceSets.named(LegacyJavaRestTestPlugin.SOURCE_SET_NAME); + setupTaskForSourceSet(project, sourceSet, t -> { + t.getSuffixes().convention(List.of("IT")); + t.getBaseClasses() + .convention(List.of("org.elasticsearch.test.ESIntegTestCase", "org.elasticsearch.test.rest.ESRestTestCase")); + }); + }); + project.getPlugins().withType(InternalJavaRestTestPlugin.class, javaRestTestPlugin -> { - NamedDomainObjectProvider sourceSet = sourceSets.named(InternalJavaRestTestPlugin.SOURCE_SET_NAME); + NamedDomainObjectProvider sourceSet = sourceSets.named(LegacyJavaRestTestPlugin.SOURCE_SET_NAME); setupTaskForSourceSet(project, sourceSet, t -> { t.getSuffixes().convention(List.of("IT")); t.getBaseClasses() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java deleted file mode 100644 index bb245bec61b9..000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.rest.compat; - -import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.VersionProperties; -import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; -import org.elasticsearch.gradle.internal.test.RestIntegTestTask; -import org.elasticsearch.gradle.internal.test.RestTestBasePlugin; -import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask; -import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask; -import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin; -import org.elasticsearch.gradle.internal.test.rest.RestResourcesExtension; -import org.elasticsearch.gradle.internal.test.rest.RestResourcesPlugin; -import org.elasticsearch.gradle.internal.test.rest.RestTestUtil; -import org.elasticsearch.gradle.testclusters.TestClustersPlugin; -import org.elasticsearch.gradle.util.GradleUtils; -import org.gradle.api.Plugin; -import org.gradle.api.Project; -import org.gradle.api.Task; -import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.Dependency; -import org.gradle.api.file.Directory; -import org.gradle.api.file.ProjectLayout; -import org.gradle.api.file.RelativePath; -import org.gradle.api.internal.file.FileOperations; -import org.gradle.api.plugins.ExtraPropertiesExtension; -import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.provider.Provider; -import org.gradle.api.tasks.SourceSet; -import org.gradle.api.tasks.SourceSetContainer; -import org.gradle.api.tasks.Sync; -import org.gradle.api.tasks.TaskProvider; - -import java.io.File; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.Map; - -import javax.inject.Inject; - -import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupYamlRestTestDependenciesDefaults; - -/** - * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. - */ -public class YamlRestCompatTestPlugin implements Plugin { - public static final String BWC_MINOR_CONFIG_NAME = "bwcMinor"; - private static final String REST_COMPAT_CHECK_TASK_NAME = "checkRestCompat"; - private static final String COMPATIBILITY_APIS_CONFIGURATION = "restCompatSpecs"; - private static final String COMPATIBILITY_TESTS_CONFIGURATION = "restCompatTests"; - private static final Path RELATIVE_API_PATH = Path.of("rest-api-spec/api"); - private static final Path RELATIVE_TEST_PATH = Path.of("rest-api-spec/test"); - private static final Path RELATIVE_REST_API_RESOURCES = Path.of("rest-api-spec/src/main/resources"); - private static final Path RELATIVE_REST_CORE = Path.of("rest-api-spec"); - private static final Path RELATIVE_REST_XPACK = Path.of("x-pack/plugin"); - private static final Path RELATIVE_REST_PROJECT_RESOURCES = Path.of("src/yamlRestTest/resources"); - private static final int COMPATIBLE_VERSION = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1; - private static final String SOURCE_SET_NAME = "yamlRestTestV" + COMPATIBLE_VERSION + "Compat"; - private ProjectLayout projectLayout; - private FileOperations fileOperations; - - @Inject - public YamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperations fileOperations) { - this.projectLayout = projectLayout; - this.fileOperations = fileOperations; - } - - @Override - public void apply(Project project) { - - final Path compatRestResourcesDir = Path.of("restResources").resolve("v" + COMPATIBLE_VERSION); - final Path compatSpecsDir = compatRestResourcesDir.resolve("yamlSpecs"); - final Path compatTestsDir = compatRestResourcesDir.resolve("yamlTests"); - - project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); - project.getPluginManager().apply(TestClustersPlugin.class); - project.getPluginManager().apply(RestTestBasePlugin.class); - project.getPluginManager().apply(RestResourcesPlugin.class); - project.getPluginManager().apply(InternalYamlRestTestPlugin.class); - - RestResourcesExtension extension = project.getExtensions().getByType(RestResourcesExtension.class); - - // create source set - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlCompatTestSourceSet = sourceSets.create(SOURCE_SET_NAME); - SourceSet yamlTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME); - GradleUtils.extendSourceSet(project, InternalYamlRestTestPlugin.SOURCE_SET_NAME, SOURCE_SET_NAME); - - // copy compatible rest specs - Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); - Dependency bwcMinor = project.getDependencies() - .project(Map.of("path", ":distribution:bwc:maintenance", "configuration", "checkout")); - project.getDependencies().add(bwcMinorConfig.getName(), bwcMinor); - - String projectPath = project.getPath(); - ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties(); - Provider copyCompatYamlSpecTask = project.getTasks() - .register("copyRestCompatApiTask", CopyRestApiTask.class, task -> { - task.dependsOn(bwcMinorConfig); - task.setConfig(bwcMinorConfig); - task.setAdditionalConfig(bwcMinorConfig); - task.getInclude().set(extension.getRestApi().getInclude()); - task.getOutputResourceDir().set(projectLayout.getBuildDirectory().dir(compatSpecsDir.toString())); - task.setSourceResourceDir( - yamlCompatTestSourceSet.getResources() - .getSrcDirs() - .stream() - .filter(f -> f.isDirectory() && f.getName().equals("resources")) - .findFirst() - .orElse(null) - ); - task.setSkipHasRestTestCheck(true); - task.setConfigToFileTree( - config -> fileOperations.fileTree( - config.getSingleFile().toPath().resolve(RELATIVE_REST_API_RESOURCES).resolve(RELATIVE_API_PATH) - ) - ); - task.setAdditionalConfigToFileTree( - config -> fileOperations.fileTree( - getCompatProjectPath(projectPath, config.getSingleFile().toPath()).resolve(RELATIVE_REST_PROJECT_RESOURCES) - .resolve(RELATIVE_API_PATH) - ) - ); - task.onlyIf(t -> isEnabled(extraProperties)); - }); - - // copy compatible rest tests - Provider copyCompatYamlTestTask = project.getTasks() - .register("copyRestCompatTestTask", CopyRestTestsTask.class, task -> { - task.dependsOn(bwcMinorConfig); - task.setCoreConfig(bwcMinorConfig); - task.setXpackConfig(bwcMinorConfig); - task.setAdditionalConfig(bwcMinorConfig); - task.getIncludeCore().set(extension.getRestTests().getIncludeCore()); - task.getIncludeXpack().set(extension.getRestTests().getIncludeXpack()); - task.getOutputResourceDir().set(projectLayout.getBuildDirectory().dir(compatTestsDir.resolve("original").toString())); - task.setCoreConfigToFileTree( - config -> fileOperations.fileTree( - config.getSingleFile() - .toPath() - .resolve(RELATIVE_REST_CORE) - .resolve(RELATIVE_REST_PROJECT_RESOURCES) - .resolve(RELATIVE_TEST_PATH) - ) - ); - task.setXpackConfigToFileTree( - config -> fileOperations.fileTree( - config.getSingleFile() - .toPath() - .resolve(RELATIVE_REST_XPACK) - .resolve(RELATIVE_REST_PROJECT_RESOURCES) - .resolve(RELATIVE_TEST_PATH) - ) - ); - task.setAdditionalConfigToFileTree( - config -> fileOperations.fileTree( - getCompatProjectPath(projectPath, config.getSingleFile().toPath()).resolve(RELATIVE_REST_PROJECT_RESOURCES) - .resolve(RELATIVE_TEST_PATH) - ) - ); - task.dependsOn(copyCompatYamlSpecTask); - task.onlyIf(t -> isEnabled(extraProperties)); - }); - - // copy both local source set apis and compat apis to a single location to be exported as an artifact - TaskProvider bundleRestCompatApis = project.getTasks().register("bundleRestCompatApis", Sync.class, task -> { - task.setDestinationDir(projectLayout.getBuildDirectory().dir("bundledCompatApis").get().getAsFile()); - task.setIncludeEmptyDirs(false); - task.from(copyCompatYamlSpecTask.flatMap(t -> t.getOutputResourceDir().map(d -> d.dir(RELATIVE_API_PATH.toString())))); - task.from(yamlCompatTestSourceSet.getProcessResourcesTaskName(), s -> { - s.include(RELATIVE_API_PATH + "/*"); - s.eachFile( - details -> details.setRelativePath( - new RelativePath(true, Arrays.stream(details.getRelativePath().getSegments()).skip(2).toArray(String[]::new)) - ) - ); - }); - }); - - // transform the copied tests task - TaskProvider transformCompatTestTask = project.getTasks() - .register("yamlRestTestV" + COMPATIBLE_VERSION + "CompatTransform", RestCompatTestTransformTask.class, task -> { - task.getSourceDirectory().set(copyCompatYamlTestTask.flatMap(CopyRestTestsTask::getOutputResourceDir)); - task.getOutputDirectory() - .set(project.getLayout().getBuildDirectory().dir(compatTestsDir.resolve("transformed").toString())); - task.onlyIf(t -> isEnabled(extraProperties)); - }); - - // Register compat rest resources with source set - yamlCompatTestSourceSet.getOutput().dir(copyCompatYamlSpecTask.map(CopyRestApiTask::getOutputResourceDir)); - yamlCompatTestSourceSet.getOutput().dir(transformCompatTestTask.map(RestCompatTestTransformTask::getOutputDirectory)); - - // Register artifact for transformed compatibility apis and tests - Configuration compatRestSpecs = project.getConfigurations().create(COMPATIBILITY_APIS_CONFIGURATION); - Configuration compatRestTests = project.getConfigurations().create(COMPATIBILITY_TESTS_CONFIGURATION); - project.getArtifacts().add(compatRestSpecs.getName(), bundleRestCompatApis.map(Sync::getDestinationDir)); - project.getArtifacts() - .add( - compatRestTests.getName(), - transformCompatTestTask.flatMap(t -> t.getOutputDirectory().dir(RELATIVE_TEST_PATH.toString())) - ); - - // Grab the original rest resources locations so we can omit them from the compatibility testing classpath down below - Provider originalYamlSpecsDir = project.getTasks() - .withType(CopyRestApiTask.class) - .named(RestResourcesPlugin.COPY_REST_API_SPECS_TASK) - .flatMap(CopyRestApiTask::getOutputResourceDir); - Provider originalYamlTestsDir = project.getTasks() - .withType(CopyRestTestsTask.class) - .named(RestResourcesPlugin.COPY_YAML_TESTS_TASK) - .flatMap(CopyRestTestsTask::getOutputResourceDir); - - String testTaskName = "yamlRestTestV" + COMPATIBLE_VERSION + "CompatTest"; - - // setup the test task - Provider yamlRestCompatTestTask = RestTestUtil.registerTestTask(project, yamlCompatTestSourceSet, testTaskName); - project.getTasks().withType(RestIntegTestTask.class).named(testTaskName).configure(testTask -> { - testTask.systemProperty("tests.restCompat", true); - // Use test runner and classpath from "normal" yaml source set - testTask.setTestClassesDirs( - yamlTestSourceSet.getOutput().getClassesDirs().plus(yamlCompatTestSourceSet.getOutput().getClassesDirs()) - ); - testTask.setClasspath( - yamlCompatTestSourceSet.getRuntimeClasspath() - // remove the "normal" api and tests - .minus(project.files(yamlTestSourceSet.getOutput().getResourcesDir())) - .minus(project.files(originalYamlSpecsDir)) - .minus(project.files(originalYamlTestsDir)) - ); - - // run compatibility tests after "normal" tests - testTask.mustRunAfter(project.getTasks().named(InternalYamlRestTestPlugin.SOURCE_SET_NAME)); - testTask.onlyIf(t -> isEnabled(extraProperties)); - }); - - setupYamlRestTestDependenciesDefaults(project, yamlCompatTestSourceSet); - - // setup IDE - GradleUtils.setupIdeForTestSourceSet(project, yamlCompatTestSourceSet); - - // add a lifecycle task to allow for a possible future additional rest compatibility without needing to change task names - TaskProvider checkRestCompatTask = project.getTasks().register(REST_COMPAT_CHECK_TASK_NAME, (thisCheckTask) -> { - thisCheckTask.setDescription("Runs all REST compatibility checks."); - thisCheckTask.setGroup("verification"); - }); - - // wire the lifecycle task into the main check task - project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(checkRestCompatTask)); - - // wire the yamlRestCompatTest into the custom lifecycle task - project.getTasks().named(REST_COMPAT_CHECK_TASK_NAME).configure(check -> check.dependsOn(yamlRestCompatTestTask)); - - } - - private boolean isEnabled(ExtraPropertiesExtension extraProperties) { - Object bwcEnabled = extraProperties.getProperties().get("bwc_tests_enabled"); - return bwcEnabled == null || (Boolean) bwcEnabled; - } - - // TODO: implement custom extension that allows us move around of the projects between major versions and still find them - private Path getCompatProjectPath(String projectPath, Path checkoutDir) { - return checkoutDir.resolve(projectPath.replaceFirst(":", "").replace(":", File.separator)); - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java index d00301e96fab..6659218593f9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/ErrorReportingTestListener.java @@ -10,12 +10,12 @@ import org.gradle.api.internal.tasks.testing.logging.FullExceptionFormatter; import org.gradle.api.internal.tasks.testing.logging.TestExceptionFormatter; import org.gradle.api.logging.Logger; +import org.gradle.api.tasks.testing.Test; import org.gradle.api.tasks.testing.TestDescriptor; import org.gradle.api.tasks.testing.TestListener; import org.gradle.api.tasks.testing.TestOutputEvent; import org.gradle.api.tasks.testing.TestOutputListener; import org.gradle.api.tasks.testing.TestResult; -import org.gradle.api.tasks.testing.logging.TestLogging; import java.io.BufferedOutputStream; import java.io.BufferedReader; @@ -38,6 +38,7 @@ public class ErrorReportingTestListener implements TestOutputListener, TestListener { private static final String REPRODUCE_WITH_PREFIX = "REPRODUCE WITH"; + private final Test testTask; private final TestExceptionFormatter formatter; private final File outputDirectory; private final Logger taskLogger; @@ -45,9 +46,10 @@ public class ErrorReportingTestListener implements TestOutputListener, TestListe private Map> reproductionLines = new ConcurrentHashMap<>(); private Set failedTests = new LinkedHashSet<>(); - public ErrorReportingTestListener(TestLogging testLogging, Logger taskLogger, File outputDirectory) { - this.formatter = new FullExceptionFormatter(testLogging); - this.taskLogger = taskLogger; + public ErrorReportingTestListener(Test testTask, File outputDirectory) { + this.testTask = testTask; + this.formatter = new FullExceptionFormatter(testTask.getTestLogging()); + this.taskLogger = testTask.getLogger(); this.outputDirectory = outputDirectory; } @@ -80,34 +82,37 @@ public void afterSuite(final TestDescriptor suite, TestResult result) { Descriptor descriptor = Descriptor.of(suite); try { - // if the test suite failed, report all captured output - if (result.getResultType().equals(TestResult.ResultType.FAILURE)) { - EventWriter eventWriter = eventWriters.get(descriptor); - - if (eventWriter != null) { - // It's not explicit what the threading guarantees are for TestListener method execution so we'll - // be explicitly safe here to avoid interleaving output from multiple test suites - synchronized (this) { - // make sure we've flushed everything to disk before reading - eventWriter.flush(); - - System.err.println("\n\nSuite: " + suite); - - try (BufferedReader reader = eventWriter.reader()) { - PrintStream out = System.out; - for (String message = reader.readLine(); message != null; message = reader.readLine()) { - if (message.startsWith(" 1> ")) { - out = System.out; - } else if (message.startsWith(" 2> ")) { - out = System.err; + if (isDumpOutputEnabled()) { + // if the test suite failed, report all captured output + if (result.getResultType().equals(TestResult.ResultType.FAILURE)) { + EventWriter eventWriter = eventWriters.get(descriptor); + + if (eventWriter != null) { + // It's not explicit what the threading guarantees are for TestListener method execution so we'll + // be explicitly safe here to avoid interleaving output from multiple test suites + synchronized (this) { + // make sure we've flushed everything to disk before reading + eventWriter.flush(); + + System.err.println("\n\nSuite: " + suite); + + try (BufferedReader reader = eventWriter.reader()) { + PrintStream out = System.out; + for (String message = reader.readLine(); message != null; message = reader.readLine()) { + if (message.startsWith(" 1> ")) { + out = System.out; + } else if (message.startsWith(" 2> ")) { + out = System.err; + } + + out.println(message); } - - out.println(message); } } } } } + if (suite.getParent() == null) { // per test task top level gradle test run suite finished if (getFailedTests().size() > 0) { @@ -250,4 +255,9 @@ public void close() throws IOException { outputFile.delete(); } } + + private boolean isDumpOutputEnabled() { + Object errorReportingEnabled = testTask.getExtensions().getExtraProperties().get("dumpOutputOnFailure"); + return errorReportingEnabled == null || (boolean) errorReportingEnabled; + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java new file mode 100644 index 000000000000..f27020511145 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test; + +import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; +import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin; +import org.elasticsearch.gradle.internal.FixtureStop; +import org.elasticsearch.gradle.internal.InternalTestClustersPlugin; +import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; +import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.elasticsearch.gradle.testclusters.TestClustersPlugin; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.provider.ProviderFactory; +import org.gradle.api.tasks.Sync; +import org.gradle.api.tasks.bundling.Zip; + +import javax.inject.Inject; + +import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.BUNDLE_PLUGIN_TASK_NAME; +import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_PLUGIN_TASK_NAME; + +/** + * @deprecated use {@link RestTestBasePlugin} instead + */ +@Deprecated +public class LegacyRestTestBasePlugin implements Plugin { + private static final String TESTS_REST_CLUSTER = "tests.rest.cluster"; + private static final String TESTS_CLUSTER = "tests.cluster"; + private static final String TESTS_CLUSTER_NAME = "tests.clustername"; + private static final String TESTS_CLUSTER_READINESS = "tests.cluster.readiness"; + + private static final String TESTS_CLUSTER_REMOTE_ACCESS = "tests.cluster.remote_access"; + + private ProviderFactory providerFactory; + + @Inject + public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { + this.providerFactory = providerFactory; + } + + @Override + public void apply(Project project) { + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); + project.getPluginManager().apply(ElasticsearchTestBasePlugin.class); + project.getPluginManager().apply(InternalTestClustersPlugin.class); + InternalPrecommitTasks.create(project, false); + project.getTasks().withType(RestIntegTestTask.class).configureEach(restIntegTestTask -> { + @SuppressWarnings("unchecked") + NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project + .getExtensions() + .getByName(TestClustersPlugin.EXTENSION_NAME); + ElasticsearchCluster cluster = testClusters.maybeCreate(restIntegTestTask.getName()); + restIntegTestTask.useCluster(cluster); + restIntegTestTask.include("**/*IT.class"); + restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString()); + if (systemProperty(TESTS_REST_CLUSTER) == null) { + if (systemProperty(TESTS_CLUSTER) != null || systemProperty(TESTS_CLUSTER_NAME) != null) { + throw new IllegalArgumentException( + String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME) + ); + } + SystemPropertyCommandLineArgumentProvider runnerNonInputProperties = + (SystemPropertyCommandLineArgumentProvider) restIntegTestTask.getExtensions().getByName("nonInputProperties"); + runnerNonInputProperties.systemProperty(TESTS_REST_CLUSTER, () -> String.join(",", cluster.getAllHttpSocketURI())); + runnerNonInputProperties.systemProperty(TESTS_CLUSTER, () -> String.join(",", cluster.getAllTransportPortURI())); + runnerNonInputProperties.systemProperty(TESTS_CLUSTER_NAME, cluster::getName); + runnerNonInputProperties.systemProperty(TESTS_CLUSTER_READINESS, () -> String.join(",", cluster.getAllReadinessPortURI())); + runnerNonInputProperties.systemProperty( + TESTS_CLUSTER_REMOTE_ACCESS, + () -> String.join(",", cluster.getAllRemoteAccessPortURI()) + ); + } else { + if (systemProperty(TESTS_CLUSTER) == null || systemProperty(TESTS_CLUSTER_NAME) == null) { + throw new IllegalArgumentException( + String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME) + ); + } + } + }); + + project.getTasks() + .named(JavaBasePlugin.CHECK_TASK_NAME) + .configure(check -> check.dependsOn(project.getTasks().withType(RestIntegTestTask.class))); + project.getTasks() + .withType(StandaloneRestIntegTestTask.class) + .configureEach(t -> t.finalizedBy(project.getTasks().withType(FixtureStop.class))); + + project.getTasks().withType(StandaloneRestIntegTestTask.class).configureEach(t -> { + t.setMaxParallelForks(1); + // if this a module or plugin, it may have an associated zip file with it's contents, add that to the test cluster + project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> { + if (GradleUtils.isModuleProject(project.getPath())) { + var bundle = project.getTasks().withType(Sync.class).named(EXPLODED_BUNDLE_PLUGIN_TASK_NAME); + t.getClusters().forEach(c -> c.module(bundle)); + } else { + var bundle = project.getTasks().withType(Zip.class).named(BUNDLE_PLUGIN_TASK_NAME); + t.getClusters().forEach(c -> c.plugin(bundle)); + } + }); + }); + } + + private String systemProperty(String propName) { + return providerFactory.systemProperty(propName).getOrNull(); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java deleted file mode 100644 index fc27bfa43798..000000000000 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/RestTestBasePlugin.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal.test; - -import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; -import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin; -import org.elasticsearch.gradle.internal.FixtureStop; -import org.elasticsearch.gradle.internal.InternalTestClustersPlugin; -import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; -import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; -import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; -import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; -import org.elasticsearch.gradle.testclusters.TestClustersPlugin; -import org.elasticsearch.gradle.util.GradleUtils; -import org.gradle.api.NamedDomainObjectContainer; -import org.gradle.api.Plugin; -import org.gradle.api.Project; -import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.provider.ProviderFactory; -import org.gradle.api.tasks.Sync; -import org.gradle.api.tasks.bundling.Zip; - -import javax.inject.Inject; - -import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.BUNDLE_PLUGIN_TASK_NAME; -import static org.elasticsearch.gradle.plugin.BasePluginBuildPlugin.EXPLODED_BUNDLE_PLUGIN_TASK_NAME; - -public class RestTestBasePlugin implements Plugin { - private static final String TESTS_REST_CLUSTER = "tests.rest.cluster"; - private static final String TESTS_CLUSTER = "tests.cluster"; - private static final String TESTS_CLUSTER_NAME = "tests.clustername"; - private static final String TESTS_CLUSTER_READINESS = "tests.cluster.readiness"; - - private ProviderFactory providerFactory; - - @Inject - public RestTestBasePlugin(ProviderFactory providerFactory) { - this.providerFactory = providerFactory; - } - - @Override - public void apply(Project project) { - project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); - project.getPluginManager().apply(ElasticsearchTestBasePlugin.class); - project.getPluginManager().apply(InternalTestClustersPlugin.class); - InternalPrecommitTasks.create(project, false); - project.getTasks().withType(RestIntegTestTask.class).configureEach(restIntegTestTask -> { - @SuppressWarnings("unchecked") - NamedDomainObjectContainer testClusters = (NamedDomainObjectContainer) project - .getExtensions() - .getByName(TestClustersPlugin.EXTENSION_NAME); - ElasticsearchCluster cluster = testClusters.maybeCreate(restIntegTestTask.getName()); - restIntegTestTask.useCluster(cluster); - restIntegTestTask.include("**/*IT.class"); - restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString()); - if (systemProperty(TESTS_REST_CLUSTER) == null) { - if (systemProperty(TESTS_CLUSTER) != null || systemProperty(TESTS_CLUSTER_NAME) != null) { - throw new IllegalArgumentException( - String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME) - ); - } - SystemPropertyCommandLineArgumentProvider runnerNonInputProperties = - (SystemPropertyCommandLineArgumentProvider) restIntegTestTask.getExtensions().getByName("nonInputProperties"); - runnerNonInputProperties.systemProperty(TESTS_REST_CLUSTER, () -> String.join(",", cluster.getAllHttpSocketURI())); - runnerNonInputProperties.systemProperty(TESTS_CLUSTER, () -> String.join(",", cluster.getAllTransportPortURI())); - runnerNonInputProperties.systemProperty(TESTS_CLUSTER_NAME, cluster::getName); - runnerNonInputProperties.systemProperty(TESTS_CLUSTER_READINESS, () -> String.join(",", cluster.getAllReadinessPortURI())); - } else { - if (systemProperty(TESTS_CLUSTER) == null || systemProperty(TESTS_CLUSTER_NAME) == null) { - throw new IllegalArgumentException( - String.format("%s, %s, and %s must all be null or non-null", TESTS_REST_CLUSTER, TESTS_CLUSTER, TESTS_CLUSTER_NAME) - ); - } - } - }); - - project.getTasks() - .named(JavaBasePlugin.CHECK_TASK_NAME) - .configure(check -> check.dependsOn(project.getTasks().withType(RestIntegTestTask.class))); - project.getTasks() - .withType(StandaloneRestIntegTestTask.class) - .configureEach(t -> t.finalizedBy(project.getTasks().withType(FixtureStop.class))); - - project.getTasks().withType(StandaloneRestIntegTestTask.class).configureEach(t -> - // if this a module or plugin, it may have an associated zip file with it's contents, add that to the test cluster - project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> { - if (GradleUtils.isModuleProject(project.getPath())) { - var bundle = project.getTasks().withType(Sync.class).named(EXPLODED_BUNDLE_PLUGIN_TASK_NAME); - t.getClusters().forEach(c -> c.module(bundle)); - } else { - var bundle = project.getTasks().withType(Zip.class).named(BUNDLE_PLUGIN_TASK_NAME); - t.getClusters().forEach(c -> c.plugin(bundle)); - } - })); - } - - private String systemProperty(String propName) { - return providerFactory.systemProperty(propName).getOrNull(); - } -} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java index 9ffaf396e7fb..163df6cc40e1 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/StandaloneRestTestPlugin.java @@ -11,8 +11,8 @@ import org.elasticsearch.gradle.internal.ExportElasticsearchBuildResourcesTask; import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.internal.precommit.InternalPrecommitTasks; -import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin; -import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.LegacyJavaRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; import org.elasticsearch.gradle.internal.test.rest.RestTestUtil; import org.gradle.api.InvalidUserDataException; import org.gradle.api.Plugin; @@ -32,8 +32,8 @@ * and run REST tests. Use BuildPlugin if you want to build main code as well * as tests. * - * @deprecated use {@link InternalClusterTestPlugin}, {@link InternalJavaRestTestPlugin} or - * {@link InternalYamlRestTestPlugin} instead. + * @deprecated use {@link InternalClusterTestPlugin}, {@link LegacyJavaRestTestPlugin} or + * {@link LegacyYamlRestTestPlugin} instead. */ @Deprecated public class StandaloneRestTestPlugin implements Plugin { @@ -46,7 +46,7 @@ public void apply(final Project project) { } project.getRootProject().getPluginManager().apply(GlobalBuildInfoPlugin.class); - project.getPluginManager().apply(RestTestBasePlugin.class); + project.getPluginManager().apply(LegacyRestTestBasePlugin.class); project.getTasks().register("buildResources", ExportElasticsearchBuildResourcesTask.class); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java index 9eb2efc77835..524f3dfedf95 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/TestWithSslPlugin.java @@ -14,7 +14,7 @@ import org.elasticsearch.gradle.internal.precommit.FilePermissionsPrecommitPlugin; import org.elasticsearch.gradle.internal.precommit.ForbiddenPatternsPrecommitPlugin; import org.elasticsearch.gradle.internal.precommit.ForbiddenPatternsTask; -import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.LegacyJavaRestTestPlugin; import org.elasticsearch.gradle.testclusters.ElasticsearchCluster; import org.elasticsearch.gradle.testclusters.TestClustersAware; import org.elasticsearch.gradle.testclusters.TestClustersPlugin; @@ -62,8 +62,8 @@ public void apply(Project project) { .withType(RestIntegTestTask.class) .configureEach(runner -> runner.systemProperty("tests.ssl.enabled", "true")); }); - project.getPlugins().withType(InternalJavaRestTestPlugin.class).configureEach(restTestPlugin -> { - SourceSet testSourceSet = Util.getJavaSourceSets(project).getByName(InternalJavaRestTestPlugin.SOURCE_SET_NAME); + project.getPlugins().withType(LegacyJavaRestTestPlugin.class).configureEach(restTestPlugin -> { + SourceSet testSourceSet = Util.getJavaSourceSets(project).getByName(LegacyJavaRestTestPlugin.SOURCE_SET_NAME); testSourceSet.getResources().srcDir(new File(keyStoreDir, "test/ssl")); project.getTasks().named(testSourceSet.getProcessResourcesTaskName()).configure(t -> t.dependsOn(exportKeyStore)); project.getTasks().withType(TestClustersAware.class).configureEach(clusterAware -> clusterAware.dependsOn(exportKeyStore)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java index 9dda731f7211..d18505ca4b11 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.internal.test.RestTestBasePlugin; +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -33,8 +33,11 @@ public void apply(Project project) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet javaTestSourceSet = sourceSets.create(SOURCE_SET_NAME); + project.getDependencies().add(javaTestSourceSet.getImplementationConfigurationName(), project.project(":test:test-clusters")); + // setup the javaRestTest task - registerTestTask(project, javaTestSourceSet); + // we use a StandloneRestIntegTestTask here so that the conventions of RestTestBasePlugin don't create a test cluster + registerTestTask(project, javaTestSourceSet, SOURCE_SET_NAME, StandaloneRestIntegTestTask.class); // setup dependencies setupJavaRestTestDependenciesDefaults(project, javaTestSourceSet); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java index dacf119994f4..b0fd142705a0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalYamlRestTestPlugin.java @@ -8,7 +8,7 @@ package org.elasticsearch.gradle.internal.test.rest; -import org.elasticsearch.gradle.internal.test.RestTestBasePlugin; +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -34,10 +34,10 @@ public void apply(Project project) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); SourceSet yamlTestSourceSet = sourceSets.create(SOURCE_SET_NAME); - registerTestTask(project, yamlTestSourceSet); + registerTestTask(project, yamlTestSourceSet, SOURCE_SET_NAME, StandaloneRestIntegTestTask.class); // setup the dependencies - setupYamlRestTestDependenciesDefaults(project, yamlTestSourceSet); + setupYamlRestTestDependenciesDefaults(project, yamlTestSourceSet, true); // setup the copy for the rest resources project.getTasks().withType(CopyRestApiTask.class).configureEach(copyRestApiTask -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyJavaRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyJavaRestTestPlugin.java new file mode 100644 index 000000000000..a1b17c110b34 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyJavaRestTestPlugin.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest; + +import org.elasticsearch.gradle.internal.test.LegacyRestTestBasePlugin; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; + +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.registerTestTask; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupJavaRestTestDependenciesDefaults; + +/** + * Apply this plugin to run the Java based REST tests. + * + * @deprecated use {@link InternalJavaRestTestPlugin} + */ +@Deprecated +public class LegacyJavaRestTestPlugin implements Plugin { + + public static final String SOURCE_SET_NAME = "javaRestTest"; + + @Override + public void apply(Project project) { + project.getPluginManager().apply(LegacyRestTestBasePlugin.class); + + // create source set + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet javaTestSourceSet = sourceSets.create(SOURCE_SET_NAME); + + // setup the javaRestTest task + registerTestTask(project, javaTestSourceSet); + + // setup dependencies + setupJavaRestTestDependenciesDefaults(project, javaTestSourceSet); + + // setup IDE + GradleUtils.setupIdeForTestSourceSet(project, javaTestSourceSet); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPlugin.java new file mode 100644 index 000000000000..4977c0924efb --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/LegacyYamlRestTestPlugin.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest; + +import org.elasticsearch.gradle.internal.test.LegacyRestTestBasePlugin; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; + +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.registerTestTask; +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupYamlRestTestDependenciesDefaults; + +/** + * Apply this plugin to run the YAML based REST tests. + * + * @deprecated use {@link InternalYamlRestTestPlugin} + */ +@Deprecated +public class LegacyYamlRestTestPlugin implements Plugin { + + public static final String SOURCE_SET_NAME = "yamlRestTest"; + + @Override + public void apply(Project project) { + project.getPluginManager().apply(LegacyRestTestBasePlugin.class); + project.getPluginManager().apply(RestResourcesPlugin.class); + + // create source set + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet yamlTestSourceSet = sourceSets.create(SOURCE_SET_NAME); + + registerTestTask(project, yamlTestSourceSet); + + // setup the dependencies + setupYamlRestTestDependenciesDefaults(project, yamlTestSourceSet); + + // setup the copy for the rest resources + project.getTasks().withType(CopyRestApiTask.class).configureEach(copyRestApiTask -> { + copyRestApiTask.setSourceResourceDir( + yamlTestSourceSet.getResources() + .getSrcDirs() + .stream() + .filter(f -> f.isDirectory() && f.getName().equals("resources")) + .findFirst() + .orElse(null) + ); + }); + + // Register rest resources with source set + yamlTestSourceSet.getOutput() + .dir( + project.getTasks() + .withType(CopyRestApiTask.class) + .named(RestResourcesPlugin.COPY_REST_API_SPECS_TASK) + .flatMap(CopyRestApiTask::getOutputResourceDir) + ); + + yamlTestSourceSet.getOutput() + .dir( + project.getTasks() + .withType(CopyRestTestsTask.class) + .named(RestResourcesPlugin.COPY_YAML_TESTS_TASK) + .flatMap(CopyRestTestsTask::getOutputResourceDir) + ); + + GradleUtils.setupIdeForTestSourceSet(project, yamlTestSourceSet); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java index 8b08c23ac287..a107cfc51226 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesExtension.java @@ -27,11 +27,11 @@ public RestResourcesExtension(ObjectFactory objects) { restTests = new XpackRestResourcesSpec(objects); } - void restApi(Action spec) { + public void restApi(Action spec) { spec.execute(restApi); } - void restTests(Action spec) { + public void restTests(Action spec) { spec.execute(restTests); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java index e99ef646cdff..309480d9f600 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestResourcesPlugin.java @@ -11,6 +11,7 @@ import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.attributes.Usage; import org.gradle.api.provider.Provider; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.SourceSetContainer; @@ -76,6 +77,9 @@ public class RestResourcesPlugin implements Plugin { public static final String COPY_YAML_TESTS_TASK = "copyYamlTestsTask"; public static final String COPY_REST_API_SPECS_TASK = "copyRestApiSpecsTask"; + public static final String YAML_TESTS_USAGE = "yaml-tests"; + public static final String YAML_XPACK_TESTS_USAGE = "yaml-xpack-tests"; + public static final String YAML_SPEC_USAGE = "yaml-spec"; private static final String EXTENSION_NAME = "restResources"; @Override @@ -86,8 +90,14 @@ public void apply(Project project) { SourceSet defaultSourceSet = sourceSets.maybeCreate(TEST_SOURCE_SET_NAME); // tests - Configuration testConfig = project.getConfigurations().create("restTestConfig"); - Configuration xpackTestConfig = project.getConfigurations().create("restXpackTestConfig"); + Configuration testConfig = project.getConfigurations().create("restTestConfig", config -> { + config.setCanBeConsumed(false); + config.getAttributes().attribute(Usage.USAGE_ATTRIBUTE, project.getObjects().named(Usage.class, YAML_TESTS_USAGE)); + }); + Configuration xpackTestConfig = project.getConfigurations().create("restXpackTestConfig", config -> { + config.setCanBeConsumed(false); + config.getAttributes().attribute(Usage.USAGE_ATTRIBUTE, project.getObjects().named(Usage.class, YAML_XPACK_TESTS_USAGE)); + }); // core // we guard this reference to :rest-api-spec with a find to make testing easier var restApiSpecProjectAvailable = project.findProject(":rest-api-spec") != null; @@ -104,8 +114,17 @@ public void apply(Project project) { .project(Map.of("path", ":x-pack:plugin", "configuration", "restXpackTests")); project.getDependencies().add(xpackTestConfig.getName(), restXPackTestdependency); } - project.getConfigurations().create("restTests"); - project.getConfigurations().create("restXpackTests"); + project.getConfigurations() + .create( + "restTests", + config -> config.getAttributes().attribute(Usage.USAGE_ATTRIBUTE, project.getObjects().named(Usage.class, YAML_TESTS_USAGE)) + ); + project.getConfigurations() + .create( + "restXpackTests", + config -> config.getAttributes() + .attribute(Usage.USAGE_ATTRIBUTE, project.getObjects().named(Usage.class, YAML_XPACK_TESTS_USAGE)) + ); Provider copyRestYamlTestTask = project.getTasks() .register(COPY_YAML_TESTS_TASK, CopyRestTestsTask.class, task -> { @@ -121,13 +140,24 @@ public void apply(Project project) { }); // api - Configuration specConfig = project.getConfigurations().create("restSpec"); // name chosen for passivity + Configuration specConfig = project.getConfigurations() + .create( + "restSpec", // name chosen for passivity + config -> { + config.setCanBeConsumed(false); + config.getAttributes().attribute(Usage.USAGE_ATTRIBUTE, project.getObjects().named(Usage.class, YAML_SPEC_USAGE)); + } + ); if (restApiSpecProjectAvailable) { Dependency restSpecDependency = project.getDependencies() .project(Map.of("path", ":rest-api-spec", "configuration", "restSpecs")); project.getDependencies().add(specConfig.getName(), restSpecDependency); } - project.getConfigurations().create("restSpecs"); + project.getConfigurations() + .create( + "restSpecs", + config -> config.getAttributes().attribute(Usage.USAGE_ATTRIBUTE, project.getObjects().named(Usage.class, YAML_SPEC_USAGE)) + ); Provider copyRestYamlApiTask = project.getTasks() .register(COPY_REST_API_SPECS_TASK, CopyRestApiTask.class, task -> { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java new file mode 100644 index 000000000000..1a7b5bc3ee2a --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -0,0 +1,319 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest; + +import groovy.lang.Closure; + +import org.elasticsearch.gradle.Architecture; +import org.elasticsearch.gradle.DistributionDownloadPlugin; +import org.elasticsearch.gradle.ElasticsearchDistribution; +import org.elasticsearch.gradle.ElasticsearchDistributionType; +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; +import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; +import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; +import org.elasticsearch.gradle.internal.info.BuildParams; +import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; +import org.elasticsearch.gradle.plugin.PluginBuildPlugin; +import org.elasticsearch.gradle.plugin.PluginPropertiesExtension; +import org.elasticsearch.gradle.test.SystemPropertyCommandLineArgumentProvider; +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.elasticsearch.gradle.transform.UnzipTransform; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Action; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.ProjectDependency; +import org.gradle.api.artifacts.type.ArtifactTypeDefinition; +import org.gradle.api.file.FileTree; +import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.provider.ProviderFactory; +import org.gradle.api.tasks.ClasspathNormalizer; +import org.gradle.api.tasks.PathSensitivity; +import org.gradle.api.tasks.util.PatternFilterable; + +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import javax.inject.Inject; + +/** + * Base plugin used for wiring up build tasks to REST testing tasks using new JUnit rule-based test clusters framework. + */ +public class RestTestBasePlugin implements Plugin { + + private static final String TESTS_RUNTIME_JAVA_SYSPROP = "tests.runtime.java"; + private static final String DEFAULT_DISTRIBUTION_SYSPROP = "tests.default.distribution"; + private static final String INTEG_TEST_DISTRIBUTION_SYSPROP = "tests.integ-test.distribution"; + private static final String BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX = "tests.snapshot.distribution."; + private static final String BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX = "tests.release.distribution."; + private static final String TESTS_CLUSTER_MODULES_PATH_SYSPROP = "tests.cluster.modules.path"; + private static final String TESTS_CLUSTER_PLUGINS_PATH_SYSPROP = "tests.cluster.plugins.path"; + private static final String DEFAULT_REST_INTEG_TEST_DISTRO = "default_distro"; + private static final String INTEG_TEST_REST_INTEG_TEST_DISTRO = "integ_test_distro"; + private static final String MODULES_CONFIGURATION = "clusterModules"; + private static final String PLUGINS_CONFIGURATION = "clusterPlugins"; + private static final String EXTRACTED_PLUGINS_CONFIGURATION = "extractedPlugins"; + + private final ProviderFactory providerFactory; + + @Inject + public RestTestBasePlugin(ProviderFactory providerFactory) { + this.providerFactory = providerFactory; + } + + @Override + public void apply(Project project) { + project.getPluginManager().apply(ElasticsearchJavaPlugin.class); + project.getPluginManager().apply(InternalDistributionDownloadPlugin.class); + + // Register integ-test and default distributions + ElasticsearchDistribution defaultDistro = createDistribution( + project, + DEFAULT_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch() + ); + ElasticsearchDistribution integTestDistro = createDistribution( + project, + INTEG_TEST_REST_INTEG_TEST_DISTRO, + VersionProperties.getElasticsearch(), + ElasticsearchDistributionTypes.INTEG_TEST_ZIP + ); + + // Create configures for module and plugin dependencies + Configuration modulesConfiguration = createPluginConfiguration(project, MODULES_CONFIGURATION, true, false); + Configuration pluginsConfiguration = createPluginConfiguration(project, PLUGINS_CONFIGURATION, false, false); + Configuration extractedPluginsConfiguration = createPluginConfiguration(project, EXTRACTED_PLUGINS_CONFIGURATION, true, true); + extractedPluginsConfiguration.extendsFrom(pluginsConfiguration); + configureArtifactTransforms(project); + + // For plugin and module projects, register the current project plugin bundle as a dependency + project.getPluginManager().withPlugin("elasticsearch.esplugin", plugin -> { + if (GradleUtils.isModuleProject(project.getPath())) { + project.getDependencies().add(MODULES_CONFIGURATION, getExplodedBundleDependency(project, project.getPath())); + } else { + project.getDependencies().add(PLUGINS_CONFIGURATION, getBundleZipTaskDependency(project, project.getPath())); + } + + }); + + project.getTasks().withType(StandaloneRestIntegTestTask.class).configureEach(task -> { + SystemPropertyCommandLineArgumentProvider nonInputSystemProperties = task.getExtensions() + .getByType(SystemPropertyCommandLineArgumentProvider.class); + + task.dependsOn(integTestDistro, modulesConfiguration); + registerDistributionInputs(task, integTestDistro); + + // Enable parallel execution for these tests since each test gets its own cluster + task.setMaxParallelForks(task.getProject().getGradle().getStartParameter().getMaxWorkerCount() / 2); + + // Disable test failure reporting since this stuff is now captured in build scans + task.getExtensions().getExtraProperties().set("dumpOutputOnFailure", false); + + // Disable the security manager and syscall filter since the test framework needs to fork processes + task.systemProperty("tests.security.manager", "false"); + task.systemProperty("tests.system_call_filter", "false"); + + // Register plugins and modules as task inputs and pass paths as system properties to tests + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulesConfiguration::getAsPath); + registerConfigurationInputs(task, modulesConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginsConfiguration::getAsPath); + registerConfigurationInputs(task, extractedPluginsConfiguration); + + // Wire up integ-test distribution by default for all test tasks + nonInputSystemProperties.systemProperty( + INTEG_TEST_DISTRIBUTION_SYSPROP, + () -> integTestDistro.getExtracted().getSingleFile().getPath() + ); + nonInputSystemProperties.systemProperty(TESTS_RUNTIME_JAVA_SYSPROP, BuildParams.getRuntimeJavaHome()); + + // Add `usesDefaultDistribution()` extension method to test tasks to indicate they require the default distro + task.getExtensions().getExtraProperties().set("usesDefaultDistribution", new Closure(task) { + @Override + public Void call(Object... args) { + task.dependsOn(defaultDistro); + registerDistributionInputs(task, defaultDistro); + + nonInputSystemProperties.systemProperty( + DEFAULT_DISTRIBUTION_SYSPROP, + providerFactory.provider(() -> defaultDistro.getExtracted().getSingleFile().getPath()) + ); + return null; + } + }); + + // Add `usesBwcDistribution(version)` extension method to test tasks to indicate they require a BWC distribution + task.getExtensions().getExtraProperties().set("usesBwcDistribution", new Closure(task) { + @Override + public Void call(Object... args) { + if (args.length != 1 && args[0] instanceof Version == false) { + throw new IllegalArgumentException("Expected exactly one argument of type org.elasticsearch.gradle.Version"); + } + + Version version = (Version) args[0]; + boolean isReleased = BuildParams.getBwcVersions().unreleasedInfo(version) == null; + String versionString = version.toString(); + ElasticsearchDistribution bwcDistro = createDistribution(project, "bwc_" + versionString, versionString); + + task.dependsOn(bwcDistro); + registerDistributionInputs(task, bwcDistro); + + nonInputSystemProperties.systemProperty( + (isReleased ? BWC_RELEASED_DISTRIBUTION_SYSPROP_PREFIX : BWC_SNAPSHOT_DISTRIBUTION_SYSPROP_PREFIX) + versionString, + providerFactory.provider(() -> bwcDistro.getExtracted().getSingleFile().getPath()) + ); + + if (version.before(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion())) { + // If we are upgrade testing older versions we also need to upgrade to 7.last + this.call(BuildParams.getBwcVersions().getMinimumWireCompatibleVersion()); + } + return null; + } + }); + }); + + project.getTasks() + .named(JavaBasePlugin.CHECK_TASK_NAME) + .configure(check -> check.dependsOn(project.getTasks().withType(StandaloneRestIntegTestTask.class))); + } + + private ElasticsearchDistribution createDistribution(Project project, String name, String version) { + return createDistribution(project, name, version, null); + } + + private ElasticsearchDistribution createDistribution(Project project, String name, String version, ElasticsearchDistributionType type) { + NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); + ElasticsearchDistribution maybeDistro = distributions.findByName(name); + if (maybeDistro == null) { + return distributions.create(name, distro -> { + distro.setVersion(version); + distro.setArchitecture(Architecture.current()); + if (type != null) { + distro.setType(type); + } + }); + } else { + return maybeDistro; + } + } + + private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Action patternFilter) { + return distribution.getExtracted().getAsFileTree().matching(patternFilter); + } + + private void registerConfigurationInputs(Task task, Configuration configuration) { + task.getInputs() + .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false))) + .withPropertyName(configuration.getName() + "-files") + .withPathSensitivity(PathSensitivity.RELATIVE); + + task.getInputs() + .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")))) + .withPropertyName(configuration.getName() + "-classpath") + .withNormalizer(ClasspathNormalizer.class); + } + + private void registerDistributionInputs(Task task, ElasticsearchDistribution distribution) { + task.getInputs() + .files(providerFactory.provider(() -> getDistributionFiles(distribution, filter -> filter.exclude("**/*.jar")))) + .withPropertyName(distribution.getName() + "-files") + .withPathSensitivity(PathSensitivity.RELATIVE); + + task.getInputs() + .files(providerFactory.provider(() -> getDistributionFiles(distribution, filter -> filter.include("**/*.jar")))) + .withPropertyName(distribution.getName() + "-classpath") + .withNormalizer(ClasspathNormalizer.class); + } + + private Optional findModulePath(Project project, String pluginName) { + return project.getRootProject() + .getAllprojects() + .stream() + .filter(p -> GradleUtils.isModuleProject(p.getPath())) + .filter(p -> p.getPlugins().hasPlugin(PluginBuildPlugin.class)) + .filter(p -> p.getExtensions().getByType(PluginPropertiesExtension.class).getName().equals(pluginName)) + .findFirst() + .map(Project::getPath); + } + + private Configuration createPluginConfiguration(Project project, String name, boolean useExploded, boolean isExtended) { + return project.getConfigurations().create(name, c -> { + if (useExploded) { + c.attributes(a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE)); + } else { + c.attributes(a -> a.attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.ZIP_TYPE)); + } + if (isExtended == false) { + c.withDependencies(dependencies -> { + // Add dependencies of any modules + Collection additionalDependencies = new LinkedHashSet<>(); + for (Iterator iterator = dependencies.iterator(); iterator.hasNext();) { + Dependency dependency = iterator.next(); + if (dependency instanceof ProjectDependency projectDependency) { + Project dependencyProject = projectDependency.getDependencyProject(); + List extendedPlugins = dependencyProject.getExtensions() + .getByType(PluginPropertiesExtension.class) + .getExtendedPlugins(); + + // Replace project dependency with explicit dependency on exploded configuration to workaround variant bug + if (projectDependency.getTargetConfiguration() == null) { + iterator.remove(); + additionalDependencies.add( + useExploded + ? getExplodedBundleDependency(project, dependencyProject.getPath()) + : getBundleZipTaskDependency(project, dependencyProject.getPath()) + ); + } + + for (String extendedPlugin : extendedPlugins) { + findModulePath(project, extendedPlugin).ifPresent( + modulePath -> additionalDependencies.add( + useExploded + ? getExplodedBundleDependency(project, modulePath) + : getBundleZipTaskDependency(project, modulePath) + ) + ); + } + } + } + + dependencies.addAll(additionalDependencies); + }); + } + }); + } + + private Dependency getExplodedBundleDependency(Project project, String projectPath) { + return project.getDependencies() + .project(Map.of("path", projectPath, "configuration", BasePluginBuildPlugin.EXPLODED_BUNDLE_CONFIG)); + } + + private Dependency getBundleZipTaskDependency(Project project, String projectPath) { + Project dependencyProject = project.findProject(projectPath); + return project.getDependencies() + .create(project.files(dependencyProject.getTasks().named(BasePluginBuildPlugin.BUNDLE_PLUGIN_TASK_NAME))); + } + + private void configureArtifactTransforms(Project project) { + project.getDependencies().registerTransform(UnzipTransform.class, transformSpec -> { + transformSpec.getFrom().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.ZIP_TYPE); + transformSpec.getTo().attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); + transformSpec.getParameters().setAsFiletreeOutput(false); + }); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java index 922157333d80..99c25b9e2570 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestUtil.java @@ -15,6 +15,7 @@ import org.gradle.api.provider.Provider; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.testing.Test; /** * Utility class to configure the necessary tasks and dependencies. @@ -34,8 +35,17 @@ public static Provider registerTestTask(Project project, Sour * Creates a {@link RestIntegTestTask} task with a custom name for the provided source set */ public static TaskProvider registerTestTask(Project project, SourceSet sourceSet, String taskName) { + return registerTestTask(project, sourceSet, taskName, RestIntegTestTask.class); + } + + /** + * Creates a {@link T} task with a custom name for the provided source set + * + * @param test task type + */ + public static TaskProvider registerTestTask(Project project, SourceSet sourceSet, String taskName, Class clazz) { // lazily create the test task - return project.getTasks().register(taskName, RestIntegTestTask.class, testTask -> { + return project.getTasks().register(taskName, clazz, testTask -> { testTask.setGroup(JavaBasePlugin.VERIFICATION_GROUP); testTask.setDescription("Runs the REST tests against an external cluster"); project.getPlugins().withType(JavaPlugin.class, t -> testTask.mustRunAfter(project.getTasks().named("test"))); @@ -49,10 +59,20 @@ public static TaskProvider registerTestTask(Project project, * Setup the dependencies needed for the YAML REST tests. */ public static void setupYamlRestTestDependenciesDefaults(Project project, SourceSet sourceSet) { + setupYamlRestTestDependenciesDefaults(project, sourceSet, false); + } + + /** + * Setup the dependencies needed for the YAML REST tests. + */ + public static void setupYamlRestTestDependenciesDefaults(Project project, SourceSet sourceSet, boolean useNewTestClusters) { Project yamlTestRunnerProject = project.findProject(":test:yaml-rest-runner"); // we shield the project dependency to make integration tests easier if (yamlTestRunnerProject != null) { project.getDependencies().add(sourceSet.getImplementationConfigurationName(), yamlTestRunnerProject); + if (useNewTestClusters) { + project.getDependencies().add(sourceSet.getImplementationConfigurationName(), project.project(":test:test-clusters")); + } } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java new file mode 100644 index 000000000000..273a0a379318 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/AbstractYamlRestCompatTestPlugin.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest.compat.compat; + +import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.VersionProperties; +import org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin; +import org.elasticsearch.gradle.internal.test.LegacyRestTestBasePlugin; +import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask; +import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask; +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.RestResourcesExtension; +import org.elasticsearch.gradle.internal.test.rest.RestResourcesPlugin; +import org.elasticsearch.gradle.testclusters.TestClustersPlugin; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.Dependency; +import org.gradle.api.file.Directory; +import org.gradle.api.file.ProjectLayout; +import org.gradle.api.file.RelativePath; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.plugins.JavaBasePlugin; +import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.Sync; +import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.testing.Test; + +import java.io.File; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Map; + +import javax.inject.Inject; + +import static org.elasticsearch.gradle.internal.test.rest.RestTestUtil.setupYamlRestTestDependenciesDefaults; + +/** + * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. + */ +public abstract class AbstractYamlRestCompatTestPlugin implements Plugin { + public static final String BWC_MINOR_CONFIG_NAME = "bwcMinor"; + private static final String REST_COMPAT_CHECK_TASK_NAME = "checkRestCompat"; + private static final String COMPATIBILITY_APIS_CONFIGURATION = "restCompatSpecs"; + private static final String COMPATIBILITY_TESTS_CONFIGURATION = "restCompatTests"; + private static final Path RELATIVE_API_PATH = Path.of("rest-api-spec/api"); + private static final Path RELATIVE_TEST_PATH = Path.of("rest-api-spec/test"); + private static final Path RELATIVE_REST_API_RESOURCES = Path.of("rest-api-spec/src/main/resources"); + private static final Path RELATIVE_REST_CORE = Path.of("rest-api-spec"); + private static final Path RELATIVE_REST_XPACK = Path.of("x-pack/plugin"); + private static final Path RELATIVE_REST_PROJECT_RESOURCES = Path.of("src/yamlRestTest/resources"); + private static final int COMPATIBLE_VERSION = Version.fromString(VersionProperties.getVersions().get("elasticsearch")).getMajor() - 1; + private static final String SOURCE_SET_NAME = "yamlRestTestV" + COMPATIBLE_VERSION + "Compat"; + private ProjectLayout projectLayout; + private FileOperations fileOperations; + + @Inject + public AbstractYamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperations fileOperations) { + this.projectLayout = projectLayout; + this.fileOperations = fileOperations; + } + + @Override + public void apply(Project project) { + + final Path compatRestResourcesDir = Path.of("restResources").resolve("v" + COMPATIBLE_VERSION); + final Path compatSpecsDir = compatRestResourcesDir.resolve("yamlSpecs"); + final Path compatTestsDir = compatRestResourcesDir.resolve("yamlTests"); + + project.getPluginManager().apply(ElasticsearchJavaBasePlugin.class); + project.getPluginManager().apply(TestClustersPlugin.class); + project.getPluginManager().apply(LegacyRestTestBasePlugin.class); + project.getPluginManager().apply(RestResourcesPlugin.class); + project.getPluginManager().apply(getBasePlugin()); + + RestResourcesExtension extension = project.getExtensions().getByType(RestResourcesExtension.class); + + // create source set + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet yamlCompatTestSourceSet = sourceSets.create(SOURCE_SET_NAME); + SourceSet yamlTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME); + GradleUtils.extendSourceSet(project, LegacyYamlRestTestPlugin.SOURCE_SET_NAME, SOURCE_SET_NAME); + + // copy compatible rest specs + Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); + Dependency bwcMinor = project.getDependencies() + .project(Map.of("path", ":distribution:bwc:maintenance", "configuration", "checkout")); + project.getDependencies().add(bwcMinorConfig.getName(), bwcMinor); + + String projectPath = project.getPath(); + ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties(); + Provider copyCompatYamlSpecTask = project.getTasks() + .register("copyRestCompatApiTask", CopyRestApiTask.class, task -> { + task.dependsOn(bwcMinorConfig); + task.setConfig(bwcMinorConfig); + task.setAdditionalConfig(bwcMinorConfig); + task.getInclude().set(extension.getRestApi().getInclude()); + task.getOutputResourceDir().set(projectLayout.getBuildDirectory().dir(compatSpecsDir.toString())); + task.setSourceResourceDir( + yamlCompatTestSourceSet.getResources() + .getSrcDirs() + .stream() + .filter(f -> f.isDirectory() && f.getName().equals("resources")) + .findFirst() + .orElse(null) + ); + task.setSkipHasRestTestCheck(true); + task.setConfigToFileTree( + config -> fileOperations.fileTree( + config.getSingleFile().toPath().resolve(RELATIVE_REST_API_RESOURCES).resolve(RELATIVE_API_PATH) + ) + ); + task.setAdditionalConfigToFileTree( + config -> fileOperations.fileTree( + getCompatProjectPath(projectPath, config.getSingleFile().toPath()).resolve(RELATIVE_REST_PROJECT_RESOURCES) + .resolve(RELATIVE_API_PATH) + ) + ); + task.onlyIf(t -> isEnabled(extraProperties)); + }); + + // copy compatible rest tests + Provider copyCompatYamlTestTask = project.getTasks() + .register("copyRestCompatTestTask", CopyRestTestsTask.class, task -> { + task.dependsOn(bwcMinorConfig); + task.setCoreConfig(bwcMinorConfig); + task.setXpackConfig(bwcMinorConfig); + task.setAdditionalConfig(bwcMinorConfig); + task.getIncludeCore().set(extension.getRestTests().getIncludeCore()); + task.getIncludeXpack().set(extension.getRestTests().getIncludeXpack()); + task.getOutputResourceDir().set(projectLayout.getBuildDirectory().dir(compatTestsDir.resolve("original").toString())); + task.setCoreConfigToFileTree( + config -> fileOperations.fileTree( + config.getSingleFile() + .toPath() + .resolve(RELATIVE_REST_CORE) + .resolve(RELATIVE_REST_PROJECT_RESOURCES) + .resolve(RELATIVE_TEST_PATH) + ) + ); + task.setXpackConfigToFileTree( + config -> fileOperations.fileTree( + config.getSingleFile() + .toPath() + .resolve(RELATIVE_REST_XPACK) + .resolve(RELATIVE_REST_PROJECT_RESOURCES) + .resolve(RELATIVE_TEST_PATH) + ) + ); + task.setAdditionalConfigToFileTree( + config -> fileOperations.fileTree( + getCompatProjectPath(projectPath, config.getSingleFile().toPath()).resolve(RELATIVE_REST_PROJECT_RESOURCES) + .resolve(RELATIVE_TEST_PATH) + ) + ); + task.dependsOn(copyCompatYamlSpecTask); + task.onlyIf(t -> isEnabled(extraProperties)); + }); + + // copy both local source set apis and compat apis to a single location to be exported as an artifact + TaskProvider bundleRestCompatApis = project.getTasks().register("bundleRestCompatApis", Sync.class, task -> { + task.setDestinationDir(projectLayout.getBuildDirectory().dir("bundledCompatApis").get().getAsFile()); + task.setIncludeEmptyDirs(false); + task.from(copyCompatYamlSpecTask.flatMap(t -> t.getOutputResourceDir().map(d -> d.dir(RELATIVE_API_PATH.toString())))); + task.from(yamlCompatTestSourceSet.getProcessResourcesTaskName(), s -> { + s.include(RELATIVE_API_PATH + "/*"); + s.eachFile( + details -> details.setRelativePath( + new RelativePath(true, Arrays.stream(details.getRelativePath().getSegments()).skip(2).toArray(String[]::new)) + ) + ); + }); + }); + + // transform the copied tests task + TaskProvider transformCompatTestTask = project.getTasks() + .register("yamlRestTestV" + COMPATIBLE_VERSION + "CompatTransform", RestCompatTestTransformTask.class, task -> { + task.getSourceDirectory().set(copyCompatYamlTestTask.flatMap(CopyRestTestsTask::getOutputResourceDir)); + task.getOutputDirectory() + .set(project.getLayout().getBuildDirectory().dir(compatTestsDir.resolve("transformed").toString())); + task.onlyIf(t -> isEnabled(extraProperties)); + }); + + // Register compat rest resources with source set + yamlCompatTestSourceSet.getOutput().dir(copyCompatYamlSpecTask.map(CopyRestApiTask::getOutputResourceDir)); + yamlCompatTestSourceSet.getOutput().dir(transformCompatTestTask.map(RestCompatTestTransformTask::getOutputDirectory)); + + // Register artifact for transformed compatibility apis and tests + Configuration compatRestSpecs = project.getConfigurations().create(COMPATIBILITY_APIS_CONFIGURATION); + Configuration compatRestTests = project.getConfigurations().create(COMPATIBILITY_TESTS_CONFIGURATION); + project.getArtifacts().add(compatRestSpecs.getName(), bundleRestCompatApis.map(Sync::getDestinationDir)); + project.getArtifacts() + .add( + compatRestTests.getName(), + transformCompatTestTask.flatMap(t -> t.getOutputDirectory().dir(RELATIVE_TEST_PATH.toString())) + ); + + // Grab the original rest resources locations so we can omit them from the compatibility testing classpath down below + Provider originalYamlSpecsDir = project.getTasks() + .withType(CopyRestApiTask.class) + .named(RestResourcesPlugin.COPY_REST_API_SPECS_TASK) + .flatMap(CopyRestApiTask::getOutputResourceDir); + Provider originalYamlTestsDir = project.getTasks() + .withType(CopyRestTestsTask.class) + .named(RestResourcesPlugin.COPY_YAML_TESTS_TASK) + .flatMap(CopyRestTestsTask::getOutputResourceDir); + + // setup the test task + TaskProvider yamlRestCompatTestTask = registerTestTask(project, yamlCompatTestSourceSet); + yamlRestCompatTestTask.configure(testTask -> { + testTask.systemProperty("tests.restCompat", true); + // Use test runner and classpath from "normal" yaml source set + testTask.setTestClassesDirs( + yamlTestSourceSet.getOutput().getClassesDirs().plus(yamlCompatTestSourceSet.getOutput().getClassesDirs()) + ); + testTask.setClasspath( + yamlCompatTestSourceSet.getRuntimeClasspath() + // remove the "normal" api and tests + .minus(project.files(yamlTestSourceSet.getOutput().getResourcesDir())) + .minus(project.files(originalYamlSpecsDir)) + .minus(project.files(originalYamlTestsDir)) + ); + + // run compatibility tests after "normal" tests + testTask.mustRunAfter(project.getTasks().named(LegacyYamlRestTestPlugin.SOURCE_SET_NAME)); + testTask.onlyIf(t -> isEnabled(extraProperties)); + }); + + setupYamlRestTestDependenciesDefaults(project, yamlCompatTestSourceSet, true); + + // setup IDE + GradleUtils.setupIdeForTestSourceSet(project, yamlCompatTestSourceSet); + + // add a lifecycle task to allow for a possible future additional rest compatibility without needing to change task names + TaskProvider checkRestCompatTask = project.getTasks().register(REST_COMPAT_CHECK_TASK_NAME, (thisCheckTask) -> { + thisCheckTask.setDescription("Runs all REST compatibility checks."); + thisCheckTask.setGroup("verification"); + }); + + // wire the lifecycle task into the main check task + project.getTasks().named(JavaBasePlugin.CHECK_TASK_NAME).configure(check -> check.dependsOn(checkRestCompatTask)); + + // wire the yamlRestCompatTest into the custom lifecycle task + project.getTasks().named(REST_COMPAT_CHECK_TASK_NAME).configure(check -> check.dependsOn(yamlRestCompatTestTask)); + + } + + public abstract TaskProvider registerTestTask(Project project, SourceSet sourceSet); + + public abstract Class> getBasePlugin(); + + private boolean isEnabled(ExtraPropertiesExtension extraProperties) { + Object bwcEnabled = extraProperties.getProperties().get("bwc_tests_enabled"); + return bwcEnabled == null || (Boolean) bwcEnabled; + } + + // TODO: implement custom extension that allows us move around of the projects between major versions and still find them + private Path getCompatProjectPath(String projectPath, Path checkoutDir) { + return checkoutDir.resolve(projectPath.replaceFirst(":", "").replace(":", File.separator)); + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java new file mode 100644 index 000000000000..e84c84cc426a --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/LegacyYamlRestCompatTestPlugin.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest.compat.compat; + +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.RestTestUtil; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.ProjectLayout; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.testing.Test; + +import javax.inject.Inject; + +/** + * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. + * + * @deprecated use {@link YamlRestCompatTestPlugin} + */ +@Deprecated +public class LegacyYamlRestCompatTestPlugin extends AbstractYamlRestCompatTestPlugin { + @Inject + public LegacyYamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperations fileOperations) { + super(projectLayout, fileOperations); + } + + @Override + public TaskProvider registerTestTask(Project project, SourceSet sourceSet) { + return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getTaskName(null, "test")); + } + + @Override + public Class> getBasePlugin() { + return LegacyYamlRestTestPlugin.class; + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java similarity index 99% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java index bfb53c23b5f1..eee1c4c21eb0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/RestCompatTestTransformTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.gradle.internal.rest.compat; +package org.elasticsearch.gradle.internal.test.rest.compat.compat; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java new file mode 100644 index 000000000000..79588ca722ff --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/YamlRestCompatTestPlugin.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.test.rest.compat.compat; + +import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin; +import org.elasticsearch.gradle.internal.test.rest.RestTestUtil; +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.ProjectLayout; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.tasks.SourceSet; +import org.gradle.api.tasks.TaskProvider; +import org.gradle.api.tasks.testing.Test; + +import javax.inject.Inject; + +/** + * Apply this plugin to run the YAML based REST tests from a prior major version against this version's cluster. + */ +public class YamlRestCompatTestPlugin extends AbstractYamlRestCompatTestPlugin { + @Inject + public YamlRestCompatTestPlugin(ProjectLayout projectLayout, FileOperations fileOperations) { + super(projectLayout, fileOperations); + } + + @Override + public TaskProvider registerTestTask(Project project, SourceSet sourceSet) { + return RestTestUtil.registerTestTask(project, sourceSet, sourceSet.getTaskName(null, "test"), StandaloneRestIntegTestTask.class); + } + + @Override + public Class> getBasePlugin() { + return InternalYamlRestTestPlugin.class; + } +} diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java index 384774190207..3d59cf337e87 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/testfixtures/TestFixturesPlugin.java @@ -117,6 +117,7 @@ public void execute(Task task) { maybeSkipTask(dockerSupport, buildFixture); ComposeExtension composeExtension = project.getExtensions().getByType(ComposeExtension.class); + composeExtension.setProjectName(project.getName()); composeExtension.getUseComposeFiles().addAll(Collections.singletonList(DOCKER_COMPOSE_YML)); composeExtension.getRemoveContainers().set(true); composeExtension.getCaptureContainersOutput() diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index 729907f9e42a..64f89d6c57ed 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -139,7 +139,9 @@ org.apache.logging.log4j.LogManager#getLogger() # This is permitted in test code, where we have a Checkstyle rule to guard # against unsafe uses. This leniency does not extend to server code. -java.lang.String#formatted(java.lang.Object[]) @ Uses default locale - use String#format(Locale, String, Object...) instead +@defaultMessage Uses default locale - use org.elasticsearch.common.Strings#format(String, Object...) instead +java.lang.String#formatted(java.lang.Object[]) +java.lang.String#format(java.lang.String,java.lang.Object[]) @defaultMessage Unbatched cluster state tasks are a source of performance and stability bugs. Implement the update logic in a executor which is reused across tasks instead. org.elasticsearch.cluster.service.MasterService#submitUnbatchedStateUpdateTask(java.lang.String, org.elasticsearch.cluster.ClusterStateUpdateTask) diff --git a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc index a16c7e393ee9..d181c60f6463 100644 --- a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc +++ b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc @@ -7,7 +7,7 @@ This section discusses the changes that you need to be aware of when migrating your application to {es} ${majorDotMinor}. -See also <> and <>. +See also {ref-bare}/${majorDotMinor}/release-highlights.html[What's new in ${majorDotMinor}] and <>. <% if (isElasticsearchSnapshot) { %> coming::[${majorDotMinorDotRevision}] <% } %> diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc index d8be43130260..1c835da9994a 100644 --- a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateMigrationFile.asciidoc @@ -7,7 +7,7 @@ This section discusses the changes that you need to be aware of when migrating your application to {es} 8.4. -See also <> and <>. +See also {ref-bare}/8.4/release-highlights.html[What's new in 8.4] and <>. coming::[8.4.0] diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 32617c9623a9..0444d4152ce1 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,32 +1,30 @@ elasticsearch = 8.7.0 -lucene = 9.4.1 +lucene = 9.5.0 bundled_jdk_vendor = openjdk -bundled_jdk = 19.0.1+10@afdd2e245b014143b62ccb916125e3ce +bundled_jdk = 19.0.2+7@fdb695a9d9064ad6b064dc6df578380c # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.14.0 +jackson = 2.14.2 snakeyaml = 1.33 icu4j = 68.2 supercsv = 2.4.0 log4j = 2.19.0 slf4j = 1.6.2 ecsLogging = 1.2.0 - jna = 5.10.0 +netty = 4.1.86.Final +commons_lang3 = 3.9 +google_oauth_client = 1.34.1 -netty = 4.1.84.Final - -commons_lang3 = 3.9 - +antlr4 = 4.11.1 # when updating this version, you need to ensure compatibility with: # - modules/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli # - x-pack/plugin/security bouncycastle=1.64 - # used by security and idp (need to be in sync due to cross-dependency in testing) opensaml = 4.0.1 @@ -36,6 +34,7 @@ httpcore = 4.4.13 httpasyncclient = 4.1.5 commonslogging = 1.2 commonscodec = 1.15 +protobuf = 3.21.9 # test dependencies randomizedrunner = 2.8.0 diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/plugin/StablePluginBuildPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/plugin/StablePluginBuildPluginFuncTest.groovy index fc706497008a..9b0a44ad9710 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/plugin/StablePluginBuildPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/plugin/StablePluginBuildPluginFuncTest.groovy @@ -19,15 +19,11 @@ import java.nio.file.Files import java.nio.file.Path import java.util.stream.Collectors +import static org.elasticsearch.gradle.fixtures.TestClasspathUtils.setupNamedComponentScanner + class StablePluginBuildPluginFuncTest extends AbstractGradleFuncTest { def setup() { - // underlaying TestClusterPlugin and StandaloneRestIntegTestTask are not cc compatible - configurationCacheCompatible = false - } - - def "can build stable plugin properties"() { - given: buildFile << """plugins { id 'elasticsearch.stable-esplugin' } @@ -38,8 +34,27 @@ class StablePluginBuildPluginFuncTest extends AbstractGradleFuncTest { name = 'myplugin' description = 'test plugin' } + repositories { + maven { + name = "local-test" + url = file("local-repo") + metadataSources { + artifact() + } + } + } """ + // underlaying TestClusterPlugin and StandaloneRestIntegTestTask are not cc compatible + configurationCacheCompatible = false + + def version = VersionProperties.elasticsearch + setupNamedComponentScanner(dir("local-repo/org/elasticsearch/elasticsearch-plugin-scanner/${version}/"), version) + + } + + def "can build stable plugin properties"() { + given: when: def result = gradleRunner(":pluginProperties").build() def props = getPluginProperties() @@ -62,32 +77,22 @@ class StablePluginBuildPluginFuncTest extends AbstractGradleFuncTest { } def "can scan and create named components file"() { + //THIS IS RUNNING A MOCK CONFIGURED IN setup() given: File jarFolder = new File(testProjectDir.root, "jars") jarFolder.mkdirs() - buildFile << """plugins { - id 'elasticsearch.stable-esplugin' - } - - version = '1.2.3' - - esplugin { - name = 'myplugin' - description = 'test plugin' - } - + buildFile << """ dependencies { implementation files('${normalized(StableApiJarMocks.createPluginApiJar(jarFolder.toPath()).toAbsolutePath().toString())}') implementation files('${normalized(StableApiJarMocks.createExtensibleApiJar(jarFolder.toPath()).toAbsolutePath().toString())}') } - """ file("src/main/java/org/acme/A.java") << """ package org.acme; - import org.elasticsearch.plugin.api.NamedComponent; + import org.elasticsearch.plugin.NamedComponent; import org.elasticsearch.plugin.scanner.test_classes.ExtensibleClass; @NamedComponent( "componentA") @@ -95,18 +100,16 @@ class StablePluginBuildPluginFuncTest extends AbstractGradleFuncTest { } """ - when: - def result = gradleRunner(":assemble").build() - Path namedComponents = file("build/generated-named-components/named_components.json").toPath(); - def map = new JsonSlurper().parse(namedComponents.toFile()) + def result = gradleRunner(":assemble", "-i").build() + then: result.task(":assemble").outcome == TaskOutcome.SUCCESS - - map == ["org.elasticsearch.plugin.scanner.test_classes.ExtensibleClass" : (["componentA" : "org.acme.A"]) ] + //we expect that a Fake namedcomponent scanner used in this test will be passed a filename to be created + File namedComponents = file("build/generated-named-components/named_components.json") + namedComponents.exists() == true } - Map getPluginProperties() { Path propsFile = file("build/generated-descriptor/stable-plugin-descriptor.properties").toPath(); Properties rawProps = new Properties() diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/NamedComponentScannerMock.java b/build-tools/src/main/java/org/elasticsearch/gradle/NamedComponentScannerMock.java new file mode 100644 index 000000000000..cfd3ab566b33 --- /dev/null +++ b/build-tools/src/main/java/org/elasticsearch/gradle/NamedComponentScannerMock.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; + +public class NamedComponentScannerMock { + public static void main(String[] args) throws IOException { + // expect a file name to passed in as a parameter + // creating a file so that we can assert about this in a test + Path path = Path.of(args[0]); + Files.createDirectories(path.getParent()); + Files.createFile(path); + } +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GenerateNamedComponentsTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GenerateNamedComponentsTask.java index 9a44a5214899..7945bce426cc 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GenerateNamedComponentsTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/GenerateNamedComponentsTask.java @@ -8,55 +8,60 @@ package org.elasticsearch.gradle.plugin; -import com.fasterxml.jackson.databind.ObjectMapper; - -import org.elasticsearch.gradle.plugin.scanner.ClassReaders; -import org.elasticsearch.gradle.plugin.scanner.NamedComponentScanner; +import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.DefaultTask; -import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileCollection; import org.gradle.api.file.ProjectLayout; import org.gradle.api.file.RegularFileProperty; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; -import org.gradle.api.model.ObjectFactory; import org.gradle.api.tasks.CompileClasspath; +import org.gradle.api.tasks.InputFiles; import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.PathSensitive; +import org.gradle.api.tasks.PathSensitivity; import org.gradle.api.tasks.TaskAction; -import org.gradle.workers.WorkAction; -import org.gradle.workers.WorkParameters; +import org.gradle.process.ExecOperations; +import org.gradle.process.ExecResult; import org.gradle.workers.WorkerExecutor; -import org.objectweb.asm.ClassReader; import java.io.File; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; import javax.inject.Inject; public abstract class GenerateNamedComponentsTask extends DefaultTask { private static final Logger LOGGER = Logging.getLogger(GenerateNamedComponentsTask.class); + private static final String NAMED_COMPONENTS_DIR = "generated-named-components/"; private static final String NAMED_COMPONENTS_FILE = "named_components.json"; + private static final String NAMED_COMPONENTS_PATH = NAMED_COMPONENTS_DIR + NAMED_COMPONENTS_FILE; private final WorkerExecutor workerExecutor; + private FileCollection pluginScannerClasspath; private FileCollection classpath; + private ExecOperations execOperations; + private ProjectLayout projectLayout; @Inject - public GenerateNamedComponentsTask(WorkerExecutor workerExecutor, ObjectFactory objectFactory, ProjectLayout projectLayout) { + public GenerateNamedComponentsTask(WorkerExecutor workerExecutor, ExecOperations execOperations, ProjectLayout projectLayout) { this.workerExecutor = workerExecutor; - getOutputFile().convention(projectLayout.getBuildDirectory().file("generated-named-components/" + NAMED_COMPONENTS_FILE)); + this.execOperations = execOperations; + this.projectLayout = projectLayout; + + getOutputFile().convention(projectLayout.getBuildDirectory().file(NAMED_COMPONENTS_PATH)); } @TaskAction public void scanPluginClasses() { - workerExecutor.noIsolation().submit(GenerateNamedComponentsAction.class, params -> { - params.getClasspath().from(classpath); - params.getOutputFile().set(getOutputFile()); + File outputFile = projectLayout.getBuildDirectory().file(NAMED_COMPONENTS_PATH).get().getAsFile(); + + ExecResult execResult = LoggedExec.javaexec(execOperations, spec -> { + spec.classpath(pluginScannerClasspath.plus(getClasspath()).getAsPath()); + spec.getMainClass().set("org.elasticsearch.plugin.scanner.NamedComponentScanner"); + spec.args(outputFile); + spec.setErrorOutput(System.err); + spec.setStandardOutput(System.out); }); + execResult.assertNormalExitValue(); } @OutputFile @@ -71,37 +76,13 @@ public void setClasspath(FileCollection classpath) { this.classpath = classpath; } - public abstract static class GenerateNamedComponentsAction implements WorkAction { - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - - @Override - public void execute() { - Set classpathFiles = getParameters().getClasspath().getFiles(); - - List classReaders = ClassReaders.ofPaths(classpathFiles.stream().map(File::toPath)).collect(Collectors.toList()); - - NamedComponentScanner namedComponentScanner = new NamedComponentScanner(); - Map> namedComponentsMap = namedComponentScanner.scanForNamedClasses(classReaders); - writeToFile(namedComponentsMap); - } - - private void writeToFile(Map> namedComponentsMap) { - try { - String json = OBJECT_MAPPER.writeValueAsString(namedComponentsMap); - File file = getParameters().getOutputFile().getAsFile().get(); - Path of = Path.of(file.getAbsolutePath()); - Files.writeString(of, json); - } catch (Exception e) { - e.printStackTrace(); - } - - } + public void setPluginScannerClasspath(FileCollection pluginScannerClasspath) { + this.pluginScannerClasspath = pluginScannerClasspath; } - interface Parameters extends WorkParameters { - - ConfigurableFileCollection getClasspath(); - - RegularFileProperty getOutputFile(); + @InputFiles + @PathSensitive(PathSensitivity.RELATIVE) + public FileCollection getPluginScannerClasspath() { + return pluginScannerClasspath; } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/StablePluginBuildPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/StablePluginBuildPlugin.java index 80f65ea26fa8..ef2d1631d560 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/StablePluginBuildPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/StablePluginBuildPlugin.java @@ -8,9 +8,12 @@ package org.elasticsearch.gradle.plugin; +import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.file.FileCollection; import org.gradle.api.file.RegularFile; import org.gradle.api.plugins.JavaPlugin; @@ -33,12 +36,21 @@ public void apply(Project project) { }); final var pluginNamedComponents = project.getTasks().register("pluginNamedComponents", GenerateNamedComponentsTask.class, t -> { + SourceSet mainSourceSet = GradleUtils.getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME); FileCollection dependencyJars = project.getConfigurations().getByName(JavaPlugin.COMPILE_CLASSPATH_CONFIGURATION_NAME); FileCollection compiledPluginClasses = mainSourceSet.getOutput().getClassesDirs(); FileCollection classPath = dependencyJars.plus(compiledPluginClasses); t.setClasspath(classPath); }); + Configuration pluginScannerConfig = project.getConfigurations().create("pluginScannerConfig"); + DependencyHandler dependencyHandler = project.getDependencies(); + pluginScannerConfig.defaultDependencies( + deps -> deps.add( + dependencyHandler.create("org.elasticsearch:elasticsearch-plugin-scanner:" + VersionProperties.getElasticsearch()) + ) + ); + pluginNamedComponents.configure(t -> { t.setPluginScannerClasspath(pluginScannerConfig); }); final var pluginExtension = project.getExtensions().getByType(PluginPropertiesExtension.class); pluginExtension.getBundleSpec().from(pluginNamedComponents); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/ClassReaders.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/ClassReaders.java deleted file mode 100644 index 40cc66c0a485..000000000000 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/ClassReaders.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.plugin.scanner; - -import org.objectweb.asm.ClassReader; - -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.jar.JarFile; -import java.util.stream.Stream; -import java.util.zip.ZipFile; - -/** - * A utility class containing methods to create streams of ASM's ClassReader - * - * @see ClassReader - */ -public class ClassReaders { - private static final String MODULE_INFO = "module-info.class"; - - /** - * This method must be used within a try-with-resources statement or similar - * control structure. - */ - public static Stream ofDirWithJars(String path) { - if (path == null) { - return Stream.empty(); - } - Path dir = Paths.get(path); - try { - return ofPaths(Files.list(dir)); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - /** - * This method must be used within a try-with-resources statement or similar - * control structure. - */ - public static Stream ofPaths(Stream list) { - return list.filter(Files::exists).flatMap(p -> { - if (p.toString().endsWith(".jar")) { - return classesInJar(p); - } else { - return classesInPath(p); - } - }); - } - - private static Stream classesInJar(Path jar) { - try { - JarFile jf = new JarFile(jar.toFile(), true, ZipFile.OPEN_READ, Runtime.version()); - - Stream classReaderStream = jf.versionedStream() - .filter(e -> e.getName().endsWith(".class") && e.getName().equals(MODULE_INFO) == false) - .map(e -> { - try (InputStream is = jf.getInputStream(e)) { - byte[] classBytes = is.readAllBytes(); - return new ClassReader(classBytes); - } catch (IOException ex) { - throw new UncheckedIOException(ex); - } - }); - return classReaderStream; - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - - private static Stream classesInPath(Path root) { - try { - Stream stream = Files.walk(root); - return stream.filter(p -> p.toString().endsWith(".class")) - .filter(p -> p.toString().endsWith("module-info.class") == false) - .map(p -> { - try (InputStream is = Files.newInputStream(p)) { - byte[] classBytes = is.readAllBytes(); - return new ClassReader(classBytes); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } -} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/NamedComponentScanner.java b/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/NamedComponentScanner.java deleted file mode 100644 index 86843c8f20ee..000000000000 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/NamedComponentScanner.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.plugin.scanner; - -import org.objectweb.asm.AnnotationVisitor; -import org.objectweb.asm.ClassReader; -import org.objectweb.asm.Opcodes; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; - -public class NamedComponentScanner { - - // returns a Map - extensible interface -> map{ namedName -> className } - public Map> scanForNamedClasses(Collection classReaderStream) { - // TODO I don't have access to stable-plugin-api here so I have to hardcode class descriptors - ClassScanner extensibleClassScanner = new ClassScanner("Lorg/elasticsearch/plugin/api/Extensible;", (classname, map) -> { - map.put(classname, classname); - return null; - }); - extensibleClassScanner.visit(classReaderStream.stream()); - - ClassScanner namedComponentsScanner = new ClassScanner( - "Lorg/elasticsearch/plugin/api/NamedComponent;"/*NamedComponent.class*/, - (classname, map) -> new AnnotationVisitor(Opcodes.ASM9) { - @Override - public void visit(String key, Object value) { - assert key.equals("value"); - assert value instanceof String; - map.put(value.toString(), classname); - } - } - ); - - namedComponentsScanner.visit(classReaderStream.stream()); - - Map> componentInfo = new HashMap<>(); - for (var e : namedComponentsScanner.getFoundClasses().entrySet()) { - String name = e.getKey(); - String classnameWithSlashes = e.getValue(); - String extensibleClassnameWithSlashes = extensibleClassScanner.getFoundClasses().get(classnameWithSlashes); - if (extensibleClassnameWithSlashes == null) { - throw new RuntimeException( - "Named component " + name + "(" + pathToClassName(classnameWithSlashes) + ") does not extend from an extensible class" - ); - } - var named = componentInfo.computeIfAbsent(pathToClassName(extensibleClassnameWithSlashes), k -> new HashMap<>()); - named.put(name, pathToClassName(classnameWithSlashes)); - } - return componentInfo; - } - - private String pathToClassName(String classWithSlashes) { - return classWithSlashes.replace('/', '.'); - } -} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 0317b74bf0d3..f6705bdb62fa 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -491,6 +491,13 @@ public List getAllReadinessPortURI() { return nodes.stream().flatMap(each -> each.getAllReadinessPortURI().stream()).collect(Collectors.toList()); } + @Override + @Internal + public List getAllRemoteAccessPortURI() { + waitForAllConditions(); + return nodes.stream().flatMap(each -> each.getAllRemoteAccessPortURI().stream()).collect(Collectors.toList()); + } + public void waitForAllConditions() { writeUnicastHostsFiles(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 85433e5a0772..fcb1af73e854 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -164,6 +164,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Path transportPortFile; private final Path httpPortsFile; private final Path readinessPortsFile; + private final Path remoteAccessPortsFile; private final Path esOutputFile; private final Path esInputFile; private final Path tmpDir; @@ -215,6 +216,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { transportPortFile = confPathLogs.resolve("transport.ports"); httpPortsFile = confPathLogs.resolve("http.ports"); readinessPortsFile = confPathLogs.resolve("readiness.ports"); + remoteAccessPortsFile = confPathLogs.resolve("remote_cluster.ports"); esOutputFile = confPathLogs.resolve("es.out"); esInputFile = workingDir.resolve("es.in"); tmpDir = workingDir.resolve("tmp"); @@ -999,6 +1001,13 @@ public List getAllReadinessPortURI() { return getReadinessPortInternal(); } + @Override + @Internal + public List getAllRemoteAccessPortURI() { + waitForAllConditions(); + return getRemoteAccessPortInternal(); + } + @Internal public File getServerLog() { return confPathLogs.resolve(defaultConfig.get("cluster.name") + "_server.json").toFile(); @@ -1022,6 +1031,9 @@ public synchronized void stop(boolean tailLogs) { if (Files.exists(readinessPortsFile)) { Files.delete(readinessPortsFile); } + if (Files.exists(remoteAccessPortsFile)) { + Files.delete(remoteAccessPortsFile); + } } catch (IOException e) { throw new UncheckedIOException(e); } @@ -1047,6 +1059,9 @@ public synchronized void stop(boolean tailLogs) { if (Files.exists(readinessPortsFile)) { Files.delete(readinessPortsFile); } + if (Files.exists(remoteAccessPortsFile)) { + Files.delete(remoteAccessPortsFile); + } } catch (IOException e) { throw new UncheckedIOException(e); } @@ -1519,6 +1534,14 @@ private List getReadinessPortInternal() { } } + private List getRemoteAccessPortInternal() { + try { + return readPortsFile(remoteAccessPortsFile); + } catch (IOException e) { + return new ArrayList<>(); + } + } + private List readPortsFile(Path file) throws IOException { try (Stream lines = Files.lines(file, StandardCharsets.UTF_8)) { return lines.map(String::trim).collect(Collectors.toList()); @@ -1673,20 +1696,17 @@ public boolean isHttpSslEnabled() { } void configureHttpWait(WaitForHttpResource wait) { - if (settings.containsKey("xpack.security.http.ssl.certificate_authorities")) { - wait.setCertificateAuthorities( - getConfigDir().resolve(settings.get("xpack.security.http.ssl.certificate_authorities").toString()).toFile() - ); - } if (settings.containsKey("xpack.security.http.ssl.certificate")) { - wait.setCertificateAuthorities(getConfigDir().resolve(settings.get("xpack.security.http.ssl.certificate").toString()).toFile()); - } - if (settings.containsKey("xpack.security.http.ssl.keystore.path") - && settings.containsKey("xpack.security.http.ssl.certificate_authorities") == false) { // Can not set both trust stores and CA - wait.setTrustStoreFile(getConfigDir().resolve(settings.get("xpack.security.http.ssl.keystore.path").toString()).toFile()); - } - if (keystoreSettings.containsKey("xpack.security.http.ssl.keystore.secure_password")) { - wait.setTrustStorePassword(keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").toString()); + wait.setServerCertificate(getConfigDir().resolve(settings.get("xpack.security.http.ssl.certificate").toString()).toFile()); + } else { + if (settings.containsKey("xpack.security.http.ssl.keystore.path")) { + wait.setServerKeystoreFile( + getConfigDir().resolve(settings.get("xpack.security.http.ssl.keystore.path").toString()).toFile() + ); + } + if (keystoreSettings.containsKey("xpack.security.http.ssl.keystore.secure_password")) { + wait.setServerKeystorePassword(keystoreSettings.get("xpack.security.http.ssl.keystore.secure_password").toString()); + } } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/SslTrustResolver.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/SslTrustResolver.java new file mode 100644 index 000000000000..4c2cbf4defb7 --- /dev/null +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/SslTrustResolver.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.testclusters; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.SecureRandom; +import java.security.cert.Certificate; +import java.security.cert.CertificateException; +import java.security.cert.CertificateFactory; +import java.security.cert.X509Certificate; +import java.util.Arrays; +import java.util.Collection; +import java.util.Enumeration; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import javax.net.ssl.X509TrustManager; + +class SslTrustResolver { + private Set certificateAuthorities; + private File trustStoreFile; + private String trustStorePassword; + private File serverCertificate; + private File serverKeyStoreFile; + private String serverKeyStorePassword; + + public void setCertificateAuthorities(File... certificateAuthorities) { + this.certificateAuthorities = new HashSet<>(Arrays.asList(certificateAuthorities)); + } + + public void setTrustStoreFile(File trustStoreFile) { + this.trustStoreFile = trustStoreFile; + } + + public void setTrustStorePassword(String trustStorePassword) { + this.trustStorePassword = trustStorePassword; + } + + public void setServerCertificate(File serverCertificate) { + this.serverCertificate = serverCertificate; + } + + public void setServerKeystoreFile(File keyStoreFile) { + this.serverKeyStoreFile = keyStoreFile; + } + + public void setServerKeystorePassword(String keyStorePassword) { + this.serverKeyStorePassword = keyStorePassword; + } + + public SSLContext getSslContext() throws GeneralSecurityException, IOException { + final TrustManager[] trustManagers = buildTrustManagers(); + if (trustManagers != null) { + return createSslContext(trustManagers); + } else { + return null; + } + } + + TrustManager[] buildTrustManagers() throws GeneralSecurityException, IOException { + var configurationCount = Stream.of( + this.certificateAuthorities, + this.trustStoreFile, + this.serverCertificate, + this.serverKeyStoreFile + ).filter(Objects::nonNull).count(); + if (configurationCount == 0) { + return null; + } else if (configurationCount > 1) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "Cannot specify more than one trust method (CA=%s, trustStore=%s, serverCert=%s, serverKeyStore=%s)", + certificateAuthorities, + trustStoreFile, + serverCertificate, + serverKeyStoreFile + ) + ); + } + if (this.certificateAuthorities != null) { + return getTrustManagers(buildTrustStoreFromCA(certificateAuthorities)); + } else if (this.trustStoreFile != null) { + return getTrustManagers(readKeyStoreFromFile(trustStoreFile, trustStorePassword)); + } else if (this.serverCertificate != null) { + return buildTrustManagerFromLeafCertificates(head(readCertificates(serverCertificate))); + } else if (this.serverKeyStoreFile != null) { + return buildTrustManagerFromLeafCertificates(readCertificatesFromKeystore(serverKeyStoreFile, serverKeyStorePassword)); + } else { + // Cannot get here unless the code gets out of sync with the 'configurationCount == 0' check above + throw new IllegalStateException("Expected to configure trust, but all configuration values are null"); + } + } + + private SSLContext createSslContext(TrustManager[] trustManagers) throws GeneralSecurityException { + SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); + sslContext.init(new KeyManager[0], trustManagers, new SecureRandom()); + return sslContext; + } + + private TrustManager[] getTrustManagers(KeyStore trustStore) throws GeneralSecurityException { + checkForTrustEntry(trustStore); + TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(trustStore); + return tmf.getTrustManagers(); + } + + private void checkForTrustEntry(KeyStore trustStore) throws KeyStoreException { + Enumeration enumeration = trustStore.aliases(); + while (enumeration.hasMoreElements()) { + if (trustStore.isCertificateEntry(enumeration.nextElement())) { + // found trusted cert entry + return; + } + } + throw new IllegalStateException("Trust-store does not contain any trusted certificate entries"); + } + + private static KeyStore buildTrustStoreFromCA(Set files) throws GeneralSecurityException, IOException { + final KeyStore store = KeyStore.getInstance(KeyStore.getDefaultType()); + store.load(null, null); + int counter = 0; + for (File ca : files) { + for (Certificate certificate : readCertificates(ca)) { + store.setCertificateEntry("cert-" + counter, certificate); + counter++; + } + } + return store; + } + + private static TrustManager[] buildTrustManagerFromLeafCertificates(Collection certificates) { + final Set trusted = certificates.stream() + .filter(X509Certificate.class::isInstance) + .map(X509Certificate.class::cast) + .collect(Collectors.toUnmodifiableSet()); + + var trustManager = new X509TrustManager() { + @Override + public void checkServerTrusted(X509Certificate[] chain, String authType) throws CertificateException { + final X509Certificate leaf = chain[0]; + if (trusted.contains(leaf) == false) { + throw new CertificateException("Untrusted leaf certificate: " + leaf.getSubjectX500Principal()); + } + } + + @Override + public X509Certificate[] getAcceptedIssuers() { + // This doesn't apply when trusting leaf certs, and is only really needed for server trust managers anyways + return new X509Certificate[0]; + } + + @Override + public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException { + throw new CertificateException("This trust manager is for client use only and cannot trust other clients"); + } + + }; + return new TrustManager[] { trustManager }; + } + + private static Collection readCertificatesFromKeystore(File file, String password) throws GeneralSecurityException, + IOException { + var keyStore = readKeyStoreFromFile(file, password); + final Set certificates = new HashSet<>(keyStore.size()); + var enumeration = keyStore.aliases(); + while (enumeration.hasMoreElements()) { + var alias = enumeration.nextElement(); + if (keyStore.isKeyEntry(alias)) { + certificates.add(keyStore.getCertificate(alias)); + } + } + return certificates; + } + + private static KeyStore readKeyStoreFromFile(File file, String password) throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(file.getName().endsWith(".jks") ? "JKS" : "PKCS12"); + try (InputStream input = new FileInputStream(file)) { + keyStore.load(input, password == null ? null : password.toCharArray()); + } + return keyStore; + } + + private static Collection readCertificates(File pemFile) throws GeneralSecurityException, IOException { + final CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); + try (InputStream input = new FileInputStream(pemFile)) { + return certFactory.generateCertificates(input); + } + } + + private Collection head(Collection certificates) { + if (certificates.isEmpty()) { + return certificates; + } else { + return List.of(certificates.iterator().next()); + } + } +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 11ad0a29f5b8..3754f57dc378 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -9,8 +9,11 @@ import org.elasticsearch.gradle.FileSystemOperationsAware; import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Task; import org.gradle.api.provider.Provider; import org.gradle.api.services.internal.BuildServiceRegistryInternal; +import org.gradle.api.specs.NotSpec; +import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; @@ -40,6 +43,11 @@ public class StandaloneRestIntegTestTask extends Test implements TestClustersAwa private boolean debugServer = false; public StandaloneRestIntegTestTask() { + Spec taskSpec = t -> getProject().getTasks() + .withType(StandaloneRestIntegTestTask.class) + .stream() + .filter(task -> task != this) + .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false); this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it uses a cluster shared by other tasks", @@ -49,13 +57,9 @@ public StandaloneRestIntegTestTask() { * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between * multiple tasks. */ - t -> getProject().getTasks() - .withType(StandaloneRestIntegTestTask.class) - .stream() - .filter(task -> task != this) - .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false) + taskSpec ); - + this.getOutputs().upToDateWhen(new NotSpec(taskSpec)); this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it is configured to preserve data directory", @@ -67,11 +71,7 @@ public StandaloneRestIntegTestTask() { @Option(option = "debug-server-jvm", description = "Enable debugging configuration, to allow attaching a debugger to elasticsearch.") public void setDebugServer(boolean enabled) { this.debugServer = enabled; - } - - @Override - public int getMaxParallelForks() { - return 1; + systemProperty("tests.cluster.debug.enabled", Boolean.toString(enabled)); } @Nested diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index e6f0fd9965b6..b6ead59296eb 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -121,6 +121,8 @@ public interface TestClusterConfiguration { List getAllReadinessPortURI(); + List getAllRemoteAccessPortURI(); + void stop(boolean tailLogs); void setNameCustomization(Function nameSupplier); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/WaitForHttpResource.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/WaitForHttpResource.java index 03368e74cdb7..550dd0fdcf8f 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/WaitForHttpResource.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/WaitForHttpResource.java @@ -12,31 +12,20 @@ import org.gradle.api.logging.Logging; import java.io.File; -import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStream; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.SecureRandom; -import java.security.cert.Certificate; -import java.security.cert.CertificateFactory; -import java.util.Arrays; import java.util.Base64; import java.util.Collections; -import java.util.Enumeration; import java.util.HashSet; import java.util.Set; import java.util.concurrent.TimeUnit; import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.KeyManager; import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManagerFactory; /** * A utility to wait for a specific HTTP resource to be available, optionally with customized TLS trusted CAs. @@ -47,11 +36,10 @@ public class WaitForHttpResource { private static final Logger logger = Logging.getLogger(WaitForHttpResource.class); + private final SslTrustResolver trustResolver; + private final URL url; + private Set validResponseCodes = Collections.singleton(200); - private URL url; - private Set certificateAuthorities; - private File trustStoreFile; - private String trustStorePassword; private String username; private String password; @@ -61,6 +49,7 @@ public WaitForHttpResource(String protocol, String host, int numberOfNodes) thro public WaitForHttpResource(URL url) { this.url = url; + this.trustResolver = new SslTrustResolver(); } public void setValidResponseCodes(int... validResponseCodes) { @@ -71,15 +60,27 @@ public void setValidResponseCodes(int... validResponseCodes) { } public void setCertificateAuthorities(File... certificateAuthorities) { - this.certificateAuthorities = new HashSet<>(Arrays.asList(certificateAuthorities)); + trustResolver.setCertificateAuthorities(certificateAuthorities); } public void setTrustStoreFile(File trustStoreFile) { - this.trustStoreFile = trustStoreFile; + trustResolver.setTrustStoreFile(trustStoreFile); } public void setTrustStorePassword(String trustStorePassword) { - this.trustStorePassword = trustStorePassword; + trustResolver.setTrustStorePassword(trustStorePassword); + } + + public void setServerCertificate(File serverCertificate) { + trustResolver.setServerCertificate(serverCertificate); + } + + public void setServerKeystoreFile(File keyStoreFile) { + trustResolver.setServerKeystoreFile(keyStoreFile); + } + + public void setServerKeystorePassword(String keyStorePassword) { + trustResolver.setServerKeystorePassword(keyStorePassword); } public void setUsername(String username) { @@ -94,13 +95,7 @@ public boolean wait(int durationInMs) throws GeneralSecurityException, Interrupt final long waitUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(durationInMs); final long sleep = Long.max(durationInMs / 10, 100); - final SSLContext ssl; - final KeyStore trustStore = buildTrustStore(); - if (trustStore != null) { - ssl = createSslContext(trustStore); - } else { - ssl = null; - } + final SSLContext ssl = trustResolver.getSslContext(); IOException failure = null; while (true) { try { @@ -159,61 +154,4 @@ private void configureBasicAuth(HttpURLConnection connection) { ); } } - - KeyStore buildTrustStore() throws GeneralSecurityException, IOException { - if (this.certificateAuthorities != null) { - if (trustStoreFile != null) { - throw new IllegalStateException("Cannot specify both truststore and CAs"); - } - return buildTrustStoreFromCA(); - } else if (trustStoreFile != null) { - return buildTrustStoreFromFile(); - } else { - return null; - } - } - - private KeyStore buildTrustStoreFromFile() throws GeneralSecurityException, IOException { - KeyStore keyStore = KeyStore.getInstance(trustStoreFile.getName().endsWith(".jks") ? "JKS" : "PKCS12"); - try (InputStream input = new FileInputStream(trustStoreFile)) { - keyStore.load(input, trustStorePassword == null ? null : trustStorePassword.toCharArray()); - } - return keyStore; - } - - private KeyStore buildTrustStoreFromCA() throws GeneralSecurityException, IOException { - final KeyStore store = KeyStore.getInstance(KeyStore.getDefaultType()); - store.load(null, null); - final CertificateFactory certFactory = CertificateFactory.getInstance("X.509"); - int counter = 0; - for (File ca : certificateAuthorities) { - try (InputStream input = new FileInputStream(ca)) { - for (Certificate certificate : certFactory.generateCertificates(input)) { - store.setCertificateEntry("cert-" + counter, certificate); - counter++; - } - } - } - return store; - } - - private SSLContext createSslContext(KeyStore trustStore) throws GeneralSecurityException { - checkForTrustEntry(trustStore); - TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(trustStore); - SSLContext sslContext = SSLContext.getInstance("TLSv1.2"); - sslContext.init(new KeyManager[0], tmf.getTrustManagers(), new SecureRandom()); - return sslContext; - } - - private void checkForTrustEntry(KeyStore trustStore) throws KeyStoreException { - Enumeration enumeration = trustStore.aliases(); - while (enumeration.hasMoreElements()) { - if (trustStore.isCertificateEntry(enumeration.nextElement())) { - // found trusted cert entry - return; - } - } - throw new IllegalStateException("Trust-store does not contain any trusted certificate entries"); - } } diff --git a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/AnnotatedHierarchyVisitorSpec.groovy b/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/AnnotatedHierarchyVisitorSpec.groovy deleted file mode 100644 index 732fa1c5e0ac..000000000000 --- a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/AnnotatedHierarchyVisitorSpec.groovy +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1 you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.plugin.scanner - -import spock.lang.Specification - -import org.elasticsearch.plugin.api.NamedComponent -import org.elasticsearch.plugin.scanner.test_classes.ExtensibleClass -import org.elasticsearch.plugin.scanner.test_classes.ExtensibleInterface -import org.elasticsearch.plugin.scanner.test_classes.ImplementingExtensible -import org.elasticsearch.plugin.scanner.test_classes.SubClass -import org.elasticsearch.plugin.api.Extensible -import org.objectweb.asm.ClassReader -import org.objectweb.asm.Type - -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.Paths - -class AnnotatedHierarchyVisitorSpec extends Specification { - Set foundClasses - AnnotatedHierarchyVisitor visitor - - def setup() { - foundClasses = new HashSet<>() - visitor = - new AnnotatedHierarchyVisitor( - Type.getDescriptor(Extensible.class), (className) -> { - foundClasses.add(className) - return null - } - ) - } - - def "empty result when no classes annotated"() { - when: - performScan(visitor, NamedComponent.class) - - then: - foundClasses.empty - } - - def "single class found when only one is annotated"() { - when: - performScan(visitor, ExtensibleClass.class) - - then: - foundClasses == [classNameToPath(ExtensibleClass.class)] as Set - } - - def "class extending an extensible is also found"() { - when: - performScan(visitor, ExtensibleClass.class, SubClass.class) - - then: - foundClasses == [classNameToPath(ExtensibleClass.class)] as Set - visitor.getClassHierarchy() == [(classNameToPath(ExtensibleClass.class)) : [classNameToPath(SubClass.class)] as Set] - } - - def "interface extending an extensible is also found"() { - when: - performScan(visitor, ImplementingExtensible.class, ExtensibleInterface.class) - - then: - foundClasses == [classNameToPath(ExtensibleInterface.class)] as Set - visitor.getClassHierarchy() == - [(classNameToPath(ExtensibleInterface.class)) : [classNameToPath(ImplementingExtensible.class)] as Set] - } - - private String classNameToPath(Class clazz) { - return clazz.getCanonicalName().replace(".", "/") - } - - private void performScan(AnnotatedHierarchyVisitor classVisitor, Class... classes) throws IOException, URISyntaxException { - for (Class clazz : classes) { - String className = classNameToPath(clazz) + ".class" - def stream = this.getClass().getClassLoader().getResourceAsStream(className) - try (InputStream fileInputStream = stream) { - ClassReader cr = new ClassReader(fileInputStream) - cr.accept(classVisitor, 0) - } - } - } - -} diff --git a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/ClassReadersSpec.groovy b/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/ClassReadersSpec.groovy deleted file mode 100644 index bc448efd2d75..000000000000 --- a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/ClassReadersSpec.groovy +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.plugin.scanner - -import spock.lang.Specification - -import org.elasticsearch.gradle.internal.test.InMemoryJavaCompiler; - -import org.elasticsearch.gradle.internal.test.JarUtils -import org.hamcrest.Matchers -import org.junit.Rule -import org.junit.rules.TemporaryFolder -import org.objectweb.asm.ClassReader - -import java.nio.file.Files -import java.nio.file.Path -import java.util.stream.Collectors -import java.util.stream.Stream - -import static org.hamcrest.MatcherAssert.assertThat - -class ClassReadersSpec extends Specification { - @Rule - TemporaryFolder testProjectDir = new TemporaryFolder() - - private Path tmpDir() throws IOException { - return testProjectDir.root.toPath(); - } - - def "module-info is not returned as a class from jar"() { - when: - final Path tmp = tmpDir(); - final Path dirWithJar = tmp.resolve("jars-dir"); - Files.createDirectories(dirWithJar); - Path jar = dirWithJar.resolve("api.jar"); - JarUtils.createJarWithEntries( - jar, Map.of( - "module-info.class", InMemoryJavaCompiler.compile( - "module-info", """ - module p {} - """) - ) - ) - - - then: - try (Stream classReaderStream = ClassReaders.ofPaths(Stream.of(jar))) { - - assertThat(classReaderStream.collect(Collectors.toList()), Matchers.empty()); - } - } - - - def "two classes are returned in a stream from jar"() { - when: - final Path tmp = tmpDir(); - final Path dirWithJar = tmp.resolve("jars-dir"); - Files.createDirectories(dirWithJar); - Path jar = dirWithJar.resolve("api.jar"); - JarUtils.createJarWithEntries( - jar, Map.of( - "p/A.class", InMemoryJavaCompiler.compile( - "p.A", """ - package p; - public class A {} - """), - "p/B.class", InMemoryJavaCompiler.compile( - "p.B", """ - package p; - public class B {} - """) - ) - ); - - - then: - try (Stream classReaderStream = ClassReaders.ofPaths(Stream.of(jar))) { - List collect = classReaderStream.map(cr -> cr.getClassName()).collect(Collectors.toList()); - assertThat(collect, Matchers.containsInAnyOrder("p/A", "p/B")); - } - } - - - def "on a classpath jars and individual classes are returned"() { - when: - final Path tmp = tmpDir(); - final Path dirWithJar = tmp.resolve("jars-dir"); - Files.createDirectories(dirWithJar); - - Path jar = dirWithJar.resolve("a_b.jar"); - JarUtils.createJarWithEntries( - jar, Map.of( - "p/A.class", InMemoryJavaCompiler.compile( - "p.A", """ - package p; - public class A {} - """), - "p/B.class", InMemoryJavaCompiler.compile( - "p.B", """ - package p; - public class B {} - """) - ) - ); - - Path jar2 = dirWithJar.resolve("c_d.jar"); - JarUtils.createJarWithEntries( - jar2, Map.of( - "p/C.class", InMemoryJavaCompiler.compile( - "p.C", """ - package p; - public class C {} - """), - "p/D.class", InMemoryJavaCompiler.compile( - "p.D", """ - package p; - public class D {} - """) - ) - ); - - InMemoryJavaCompiler.compile( - "p.E", """ - package p; - public class E {} - """ - ); - Files.write( - tmp.resolve("E.class"), InMemoryJavaCompiler.compile( - "p.E", """ - package p; - public class E {} - """) - ); - - - then: - try (Stream classReaderStream = ClassReaders.ofPaths(Stream.of(tmp, jar, jar2))) { - - List collect = classReaderStream.map(cr -> cr.getClassName()).collect(Collectors.toList()); - assertThat(collect, Matchers.containsInAnyOrder("p/A", "p/B", "p/C", "p/D", "p/E")); - } - } - - def "classes from multiple jars in a dir are returned"() { - when: - final Path tmp = tmpDir(); - final Path dirWithJar = tmp.resolve("jars-dir"); - Files.createDirectories(dirWithJar); - - - Path jar = dirWithJar.resolve("a_b.jar"); - JarUtils.createJarWithEntries( - jar, Map.of( - "p/A.class", InMemoryJavaCompiler.compile( - "p.A", """ - package p; - public class A {} - """), - "p/B.class", InMemoryJavaCompiler.compile( - "p.B", """ - package p; - public class B {} - """) - ) - ); - - Path jar2 = dirWithJar.resolve("c_d.jar"); - JarUtils.createJarWithEntries( - jar2, Map.of( - "p/C.class", InMemoryJavaCompiler.compile( - "p.C", """ - package p; - public class C {} - """), - "p/D.class", InMemoryJavaCompiler.compile( - "p.D", """ - package p; - public class D {} - """) - ) - ); - - then: - try (Stream classReaderStream = ClassReaders.ofDirWithJars(dirWithJar.toString())) { - List collect = classReaderStream.map(cr -> cr.getClassName()).collect(Collectors.toList()); - assertThat(collect, Matchers.containsInAnyOrder("p/A", "p/B", "p/C", "p/D")); - } - } -} diff --git a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/ClassScannerSpec.groovy b/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/ClassScannerSpec.groovy deleted file mode 100644 index d03ae28576ae..000000000000 --- a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/ClassScannerSpec.groovy +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.plugin.scanner - -import spock.lang.Specification - -import org.elasticsearch.plugin.api.Extensible -import org.hamcrest.Matchers -import org.objectweb.asm.ClassReader -import org.objectweb.asm.Type - -import java.nio.file.Paths -import java.util.stream.Collectors -import java.util.stream.Stream - -import static org.hamcrest.MatcherAssert.assertThat - -class ClassScannerSpec extends Specification { - static final System.Logger logger = System.getLogger(ClassScannerSpec.class.getName()) - def "class and interface hierarchy is scanned"() { - given: - def reader = new ClassScanner( - Type.getDescriptor(Extensible.class), (classname, map) -> { - map.put(classname, classname) - return null - } - ) - Stream classReaderStream = ofClassPath() - logger.log(System.Logger.Level.INFO, "classReaderStream size "+ofClassPath().collect(Collectors.toList()).size()) - - when: - reader.visit(classReaderStream); - Map extensibleClasses = reader.getFoundClasses() - - then: - assertThat( - extensibleClasses, - Matchers.allOf( - Matchers.hasEntry( - "org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass", - "org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass" - ), - Matchers.hasEntry( - "org/elasticsearch/plugin/scanner/test_classes/ImplementingExtensible", - "org/elasticsearch/plugin/scanner/test_classes/ExtensibleInterface" - ), - Matchers.hasEntry( - "org/elasticsearch/plugin/scanner/test_classes/SubClass", - "org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass" - ) - ) - ); - } - - static Stream ofClassPath() throws IOException { - String classpath = System.getProperty("java.class.path"); - logger.log(System.Logger.Level.INFO, "classpath "+classpath); - return ofClassPath(classpath); - } - - static Stream ofClassPath(String classpath) { - if (classpath != null && classpath.equals("") == false) {// todo when do we set cp to "" ? - def classpathSeparator = System.getProperty("path.separator") - logger.log(System.Logger.Level.INFO, "classpathSeparator "+classpathSeparator); - - String[] pathelements = classpath.split(classpathSeparator); - return ClassReaders.ofPaths(Arrays.stream(pathelements).map(Paths::get)); - } - return Stream.empty(); - } - -} diff --git a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/NamedComponentScannerSpec.groovy b/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/NamedComponentScannerSpec.groovy deleted file mode 100644 index fc48ae6a0ad6..000000000000 --- a/build-tools/src/test/groovy/org/elasticsearch/gradle/plugin/scanner/NamedComponentScannerSpec.groovy +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugin.scanner - -import net.bytebuddy.ByteBuddy -import net.bytebuddy.dynamic.DynamicType -import spock.lang.Specification - -import org.elasticsearch.gradle.internal.test.InMemoryJavaCompiler -import org.elasticsearch.gradle.internal.test.JarUtils -import org.elasticsearch.gradle.internal.test.StableApiJarMocks -import org.elasticsearch.gradle.plugin.scanner.ClassReaders -import org.elasticsearch.gradle.plugin.scanner.NamedComponentScanner -import org.elasticsearch.plugin.scanner.test_classes.ExtensibleClass -import org.elasticsearch.plugin.scanner.test_classes.ExtensibleInterface -import org.elasticsearch.plugin.scanner.test_classes.TestNamedComponent -import org.elasticsearch.plugin.api.Extensible -import org.elasticsearch.plugin.api.NamedComponent -import org.junit.Rule -import org.junit.rules.TemporaryFolder -import org.objectweb.asm.ClassReader - -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.Paths -import java.util.stream.Collectors - -import static org.hamcrest.MatcherAssert.assertThat -import static org.hamcrest.Matchers.equalTo - -class NamedComponentScannerSpec extends Specification { - @Rule - TemporaryFolder testProjectDir = new TemporaryFolder() - - private Path tmpDir() throws IOException { - return testProjectDir.root.toPath(); - } - - NamedComponentScanner namedComponentScanner = new NamedComponentScanner(); - - def "named component is found when single class provided"() { - when: - Map> namedComponents = namedComponentScanner.scanForNamedClasses( - classReaderStream(TestNamedComponent.class, ExtensibleInterface.class) - ) - - then: - assertThat( - namedComponents, - equalTo( - Map.of( - ExtensibleInterface.class.getCanonicalName(), - Map.of("test_named_component", TestNamedComponent.class.getCanonicalName()) - ) - ) - ) - - } - - def "named components are found when single jar provided"() { - given: - final Path tmp = tmpDir(); - final Path dirWithJar = tmp.resolve("jars-dir"); - Files.createDirectories(dirWithJar); - Path jar = dirWithJar.resolve("plugin.jar"); - JarUtils.createJarWithEntries( - jar, Map.of( - "p/A.class", InMemoryJavaCompiler.compile( - "p.A", """ - package p; - import org.elasticsearch.plugin.api.*; - import org.elasticsearch.plugin.scanner.test_classes.*; - @NamedComponent("a_component") - public class A extends ExtensibleClass {} - """ - ), "p/B.class", InMemoryJavaCompiler.compile( - "p.B", """ - package p; - import org.elasticsearch.plugin.api.*; - import org.elasticsearch.plugin.scanner.test_classes.*; - @NamedComponent("b_component") - public class B implements ExtensibleInterface{} - """ - ) - ) - ); - StableApiJarMocks.createPluginApiJar(dirWithJar); - StableApiJarMocks.createExtensibleApiJar(dirWithJar);//for instance analysis api - - - Collection classReaderStream = ClassReaders.ofDirWithJars(dirWithJar.toString()).collect(Collectors.toList()) - - when: - Map> namedComponents = namedComponentScanner.scanForNamedClasses(classReaderStream); - - then: - assertThat( - namedComponents, - equalTo( - Map.of( - ExtensibleClass.class.getCanonicalName(), - Map.of("a_component", "p.A"), - ExtensibleInterface.class.getCanonicalName(), - Map.of("b_component", "p.B") - ) - ) - ); - } - - def "named components can extend common super class"() { - given: - Map sources = Map.of( - "p.CustomExtensibleInterface", - """ - package p; - import org.elasticsearch.plugin.api.*; - import org.elasticsearch.plugin.scanner.test_classes.*; - public interface CustomExtensibleInterface extends ExtensibleInterface {} - """, - // note that this class implements a custom interface - "p.CustomExtensibleClass", - """ - package p; - import org.elasticsearch.plugin.api.*; - import org.elasticsearch.plugin.scanner.test_classes.*; - public class CustomExtensibleClass implements CustomExtensibleInterface {} - """, - "p.A", - """ - package p; - import org.elasticsearch.plugin.api.*; - import org.elasticsearch.plugin.scanner.test_classes.*; - @NamedComponent("a_component") - public class A extends CustomExtensibleClass {} - """, - "p.B", - """ - package p; - import org.elasticsearch.plugin.api.*; - import org.elasticsearch.plugin.scanner.test_classes.*; - @NamedComponent("b_component") - public class B implements CustomExtensibleInterface{} - """ - ); - var classToBytes = InMemoryJavaCompiler.compile(sources); - - Map jarEntries = new HashMap<>(); - jarEntries.put("p/CustomExtensibleInterface.class", classToBytes.get("p.CustomExtensibleInterface")); - jarEntries.put("p/CustomExtensibleClass.class", classToBytes.get("p.CustomExtensibleClass")); - jarEntries.put("p/A.class", classToBytes.get("p.A")); - jarEntries.put("p/B.class", classToBytes.get("p.B")); - - final Path tmp = tmpDir(); - final Path dirWithJar = tmp.resolve("jars-dir"); - Files.createDirectories(dirWithJar); - Path jar = dirWithJar.resolve("plugin.jar"); - JarUtils.createJarWithEntries(jar, jarEntries); - - StableApiJarMocks.createPluginApiJar(dirWithJar) - StableApiJarMocks.createExtensibleApiJar(dirWithJar);//for instance analysis api - - Collection classReaderStream = ClassReaders.ofDirWithJars(dirWithJar.toString()).collect(Collectors.toList()) - - when: - Map> namedComponents = namedComponentScanner.scanForNamedClasses(classReaderStream); - - then: - assertThat( - namedComponents, - equalTo( - Map.of( - ExtensibleInterface.class.getCanonicalName(), - Map.of( - "a_component", "p.A", - "b_component", "p.B" - ) - ) - ) - ); - } - - - - private Collection classReaderStream(Class... classes) { - try { - return Arrays.stream(classes).map( - clazz -> { - String className = classNameToPath(clazz) + ".class"; - def stream = this.getClass().getClassLoader().getResourceAsStream(className) - try (InputStream is = stream) { - byte[] classBytes = is.readAllBytes(); - ClassReader classReader = new ClassReader(classBytes); - return classReader; - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - ).collect(Collectors.toList()) - } catch (Exception e) { - throw new RuntimeException(e); - } - - } - - private String classNameToPath(Class clazz) { - return clazz.getCanonicalName().replace(".", "/"); - } - - -} diff --git a/build-tools/src/test/groovy/org/elasticsearch/gradle/testclusters/SslTrustResolverSpec.groovy b/build-tools/src/test/groovy/org/elasticsearch/gradle/testclusters/SslTrustResolverSpec.groovy new file mode 100644 index 000000000000..f1a87cd9ff26 --- /dev/null +++ b/build-tools/src/test/groovy/org/elasticsearch/gradle/testclusters/SslTrustResolverSpec.groovy @@ -0,0 +1,117 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.testclusters + +import spock.lang.Specification + +import java.nio.file.Paths +import java.security.cert.Certificate +import java.security.cert.CertificateException +import java.security.cert.CertificateFactory +import javax.net.ssl.TrustManager +import javax.net.ssl.X509TrustManager + +class SslTrustResolverSpec extends Specification { + + def "build trust manager from trust store file"() { + given: + SslTrustResolver resolver = new SslTrustResolver() + URL ca = getClass().getResource("/ca.p12") + + when: + resolver.setTrustStoreFile(Paths.get(ca.toURI()).toFile()) + resolver.setTrustStorePassword("password") + final TrustManager[] trustManagers = resolver.buildTrustManagers() + + then: + trustManagers.length == 1 + trustManagers[0] instanceof X509TrustManager + def issuers = ((X509TrustManager) trustManagers[0]).getAcceptedIssuers() + issuers.length == 1 + issuers[0].subjectX500Principal.toString() == 'CN=Elastic Certificate Tool Autogenerated CA' + } + + def "build trust manager from certificate authorities file"() { + given: + SslTrustResolver resolver = new SslTrustResolver() + URL ca = getClass().getResource("/ca.pem") + + when: + resolver.setCertificateAuthorities(Paths.get(ca.toURI()).toFile()) + final TrustManager[] trustManagers = resolver.buildTrustManagers() + + then: + trustManagers.length == 1 + trustManagers[0] instanceof X509TrustManager + def issuers = ((X509TrustManager) trustManagers[0]).getAcceptedIssuers() + issuers.length == 1 + issuers[0].subjectX500Principal.toString() == 'CN=Elastic Certificate Tool Autogenerated CA' + } + + def "build trust manager from keystore file"() { + given: + SslTrustResolver resolver = new SslTrustResolver() + URL ks = getClass().getResource("/server.p12") + Certificate[] serverChain = readCertificates("/server.chain") + Certificate[] issuingChain = readCertificates("/issuing.pem") + Certificate[] altChain = readCertificates("/ca.pem") + + when: + resolver.setServerKeystoreFile(Paths.get(ks.toURI()).toFile()) + resolver.setServerKeystorePassword("password") + final TrustManager[] trustManagers = resolver.buildTrustManagers() + + then: + trustManagers.length == 1 + trustManagers[0] instanceof X509TrustManager + + def trustManager = (X509TrustManager) trustManagers[0] + isTrusted(trustManager, serverChain) == true; + isTrusted(trustManager, issuingChain) == false; + isTrusted(trustManager, altChain) == false; + } + + def "build trust manager from server certificate file"() { + given: + SslTrustResolver resolver = new SslTrustResolver() + URL chain = getClass().getResource("/server.chain") + Certificate[] serverChain = readCertificates("/server.chain") + Certificate[] issuingChain = readCertificates("/issuing.pem") + Certificate[] altChain = readCertificates("/ca.pem") + + when: + resolver.setServerCertificate(Paths.get(chain.toURI()).toFile()) + final TrustManager[] trustManagers = resolver.buildTrustManagers() + + then: + trustManagers.length == 1 + trustManagers[0] instanceof X509TrustManager + + def trustManager = (X509TrustManager) trustManagers[0] + isTrusted(trustManager, serverChain) == true; + isTrusted(trustManager, issuingChain) == false; + isTrusted(trustManager, altChain) == false; + } + + private Certificate[] readCertificates(String resourceName) { + CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509") + return getClass().getResource(resourceName).withInputStream { stream -> + certificateFactory.generateCertificates(stream) + } + } + + private boolean isTrusted(X509TrustManager trustManager, Certificate[] certificateChain) { + try { + trustManager.checkServerTrusted(((java.security.cert.X509Certificate[]) certificateChain), "RSA"); + return true; + } catch(CertificateException ignore) { + return false; + } + } +} diff --git a/build-tools/src/test/groovy/org/elasticsearch/gradle/testclusters/WaitForHttpResourceSpec.groovy b/build-tools/src/test/groovy/org/elasticsearch/gradle/testclusters/WaitForHttpResourceSpec.groovy deleted file mode 100644 index ffa8b569e3e9..000000000000 --- a/build-tools/src/test/groovy/org/elasticsearch/gradle/testclusters/WaitForHttpResourceSpec.groovy +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.testclusters - -import spock.lang.Specification - -import java.nio.file.Paths -import java.security.KeyStore -import java.security.cert.Certificate -import java.security.cert.X509Certificate - -class WaitForHttpResourceSpec extends Specification { - - def "build trust store from trust store file"() { - given: - WaitForHttpResource http = new WaitForHttpResource(new URL("https://localhost/")) - URL ca = getClass().getResource("/ca.p12") - - when: - http.setTrustStoreFile(Paths.get(ca.toURI()).toFile()) - http.setTrustStorePassword("password") - final KeyStore store = http.buildTrustStore() - final Certificate certificate = store.getCertificate("ca") - - then: - certificate != null - certificate instanceof X509Certificate - certificate.subjectX500Principal.toString() == 'CN=Elastic Certificate Tool Autogenerated CA' - } - - def "build trust store from certificate authorities file"() { - given: - WaitForHttpResource http = new WaitForHttpResource(new URL("https://localhost/")) - URL ca = getClass().getResource("/ca.pem") - - when: - http.setCertificateAuthorities(Paths.get(ca.toURI()).toFile()) - KeyStore store = http.buildTrustStore() - Certificate certificate = store.getCertificate("cert-0") - - then: - certificate != null - certificate instanceof X509Certificate - certificate.subjectX500Principal.toString() == "CN=Elastic Certificate Tool Autogenerated CA" - } -} diff --git a/build-tools/src/test/resources/issuing.p12 b/build-tools/src/test/resources/issuing.p12 new file mode 100644 index 000000000000..85118080aacd Binary files /dev/null and b/build-tools/src/test/resources/issuing.p12 differ diff --git a/build-tools/src/test/resources/issuing.pem b/build-tools/src/test/resources/issuing.pem new file mode 100644 index 000000000000..49d1f9d95052 --- /dev/null +++ b/build-tools/src/test/resources/issuing.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICBjCCAW+gAwIBAgIUY8ypR5RTcCCoHom3Mvq3Cl4MWggwDQYJKoZIhvcNAQEL +BQAwFTETMBEGA1UEAxMKSXNzdWluZyBDQTAeFw0yMzAxMDQwNjE3MDlaFw0zNjA5 +MTIwNjE3MDlaMBUxEzARBgNVBAMTCklzc3VpbmcgQ0EwgZ8wDQYJKoZIhvcNAQEB +BQADgY0AMIGJAoGBAKta2elMeFOjCoMaaT3XiQh4bcs8FfG2x96puWu8vy184qBp +26JDizlpJROx8WmE1hZEoexlagltYZ9bQrNyxrXDFbFw6Ccg+0o8RZmLzJE+v5Th +vE2ezzGcDDlb+gvFKTmdTiunI1G6y+5qe03YTtJniT5wOSUMOahRf2qc4p/DAgMB +AAGjUzBRMB0GA1UdDgQWBBSC0hVfH3oNVV5ZPM0fxfMCfVTK0DAfBgNVHSMEGDAW +gBSC0hVfH3oNVV5ZPM0fxfMCfVTK0DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4GBAFKZ7KV9XP5Q5baRk3wgvpejBEpLf438icvpxU++WU8neX1vc9qK +ZNbivyaVG1JthobV9A/lS9pNSlJtGvTjpp8Aq2uM0V2LDaLeNQ/sMrKWpsvYZkKU +kIkbrBgUHm/zHfuhRhS3X1siKa7muH+T3bH//Vc3x6lO8nfX1iz6XHcy +-----END CERTIFICATE----- diff --git a/build-tools/src/test/resources/server.chain b/build-tools/src/test/resources/server.chain new file mode 100644 index 000000000000..07da615d580a --- /dev/null +++ b/build-tools/src/test/resources/server.chain @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIICDjCCAXegAwIBAgIVAO17tpEpOGKg8EH4BIKpYMlPkwtLMA0GCSqGSIb3DQEB +CwUAMBUxEzARBgNVBAMTCklzc3VpbmcgQ0EwHhcNMjMwMTA0MDYxOTMyWhcNMzYw +OTEyMDYxOTMyWjARMQ8wDQYDVQQDEwZzZXJ2ZXIwgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBAMQkTJQP3ust+YERFxqjm6Ck+w4ql9Dc7L87mkdJhY7kVrv8hVb5 +UB+UjNYpO/hBKKE3sAqenuni/XfNRHbmVq+c3xNgJxkr9JGOzIf76TBDEORlVe99 +uViqQb+WwGcO+qY7NN5WcNKd8W779Y6CENPlMiyRzdEmgf4AnwmxOTs7AgMBAAGj +XjBcMB0GA1UdDgQWBBRj8cuUKTNtKJvhy7mMganBGg5RCjAfBgNVHSMEGDAWgBSC +0hVfH3oNVV5ZPM0fxfMCfVTK0DAPBgNVHREECDAGhwR/AAABMAkGA1UdEwQCMAAw +DQYJKoZIhvcNAQELBQADgYEASox5Lk7otOz85+G0qQOJyortpOTyxZgJJ5RI3RO5 +7d59vMabbktdj+750fL50G07oeACip6kyglQqAE+I6UPQDepTpvoEv3OxDvilehX +HCq2f0rKFnFB8ueT9Qx6tzAz1Luz764KU4AzP4rBcJicMZW1cmGsXD3/TDDEE+qJ +YEY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICBjCCAW+gAwIBAgIUY8ypR5RTcCCoHom3Mvq3Cl4MWggwDQYJKoZIhvcNAQEL +BQAwFTETMBEGA1UEAxMKSXNzdWluZyBDQTAeFw0yMzAxMDQwNjE3MDlaFw0zNjA5 +MTIwNjE3MDlaMBUxEzARBgNVBAMTCklzc3VpbmcgQ0EwgZ8wDQYJKoZIhvcNAQEB +BQADgY0AMIGJAoGBAKta2elMeFOjCoMaaT3XiQh4bcs8FfG2x96puWu8vy184qBp +26JDizlpJROx8WmE1hZEoexlagltYZ9bQrNyxrXDFbFw6Ccg+0o8RZmLzJE+v5Th +vE2ezzGcDDlb+gvFKTmdTiunI1G6y+5qe03YTtJniT5wOSUMOahRf2qc4p/DAgMB +AAGjUzBRMB0GA1UdDgQWBBSC0hVfH3oNVV5ZPM0fxfMCfVTK0DAfBgNVHSMEGDAW +gBSC0hVfH3oNVV5ZPM0fxfMCfVTK0DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4GBAFKZ7KV9XP5Q5baRk3wgvpejBEpLf438icvpxU++WU8neX1vc9qK +ZNbivyaVG1JthobV9A/lS9pNSlJtGvTjpp8Aq2uM0V2LDaLeNQ/sMrKWpsvYZkKU +kIkbrBgUHm/zHfuhRhS3X1siKa7muH+T3bH//Vc3x6lO8nfX1iz6XHcy +-----END CERTIFICATE----- diff --git a/build-tools/src/test/resources/server.p12 b/build-tools/src/test/resources/server.p12 new file mode 100644 index 000000000000..95674e0fe78e Binary files /dev/null and b/build-tools/src/test/resources/server.p12 differ diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/TestClasspathUtils.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/TestClasspathUtils.groovy index 94e012c95021..e41ace83b75b 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/TestClasspathUtils.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/TestClasspathUtils.groovy @@ -16,17 +16,40 @@ import net.bytebuddy.dynamic.DynamicType import net.bytebuddy.implementation.ExceptionMethod import net.bytebuddy.implementation.FixedValue import net.bytebuddy.implementation.Implementation +import net.bytebuddy.implementation.MethodDelegation + +import org.elasticsearch.gradle.NamedComponentScannerMock import static org.junit.Assert.fail class TestClasspathUtils { + // we cannot access real NamedComponentScanner in libs:plugin-api-scanner so we create a fake class + static void setupNamedComponentScanner(File projectRoot, String version) { + def value = MethodDelegation.to(NamedComponentScannerMock.class) + + DynamicType.Unloaded dynamicType = new ByteBuddy().subclass(Object.class) + .name("org.elasticsearch.plugin.scanner.NamedComponentScanner") + .defineMethod("main", void.class, Visibility.PUBLIC, Ownership.STATIC) + .withParameters(String[].class) + .intercept(value) + .make() + .include(new ByteBuddy().redefine(NamedComponentScannerMock.class).make()) + + try { + dynamicType.toJar(targetFile(projectRoot, "elasticsearch-plugin-scanner", version)) + } catch (IOException e) { + e.printStackTrace() + fail("Cannot setup jdk jar hell classpath") + } + } + static void setupJarHellJar(File projectRoot) { - generateJdkJarHellCheck(projectRoot, "org.elasticsearch.jdk.JarHell", "current", FixedValue.value(TypeDescription.VOID)) + generateJarWithClass(projectRoot, "org.elasticsearch.jdk.JarHell","elasticsearch-core", "current", FixedValue.value(TypeDescription.VOID)) } static void setupJarHellJar(File projectRoot, String version) { - generateJdkJarHellCheck(projectRoot, "org.elasticsearch.jdk.JarHell", version, FixedValue.value(TypeDescription.VOID)) + generateJarWithClass(projectRoot, "org.elasticsearch.jdk.JarHell", "elasticsearch-core", version, FixedValue.value(TypeDescription.VOID)) } static void setupJarJdkClasspath(File projectRoot) { @@ -39,26 +62,28 @@ class TestClasspathUtils { } private static void generateJdkJarHellCheck(File targetDir, String className, Implementation mainImplementation) { - generateJdkJarHellCheck(targetDir, className, "current", mainImplementation) + generateJarWithClass(targetDir, className, "elasticsearch-core", "current", mainImplementation) } - private static void generateJdkJarHellCheck(File targetDir, String className, String version, Implementation mainImplementation) { + + private static void generateJarWithClass(File targetDir, String className, String artifactName, String version, Implementation mainImplementation) { DynamicType.Unloaded dynamicType = new ByteBuddy().subclass(Object.class) .name(className) .defineMethod("main", void.class, Visibility.PUBLIC, Ownership.STATIC) .withParameters(String[].class) .intercept(mainImplementation) .make() + try { - dynamicType.toJar(targetFile(targetDir, version)) + dynamicType.toJar(targetFile(targetDir, artifactName, version)) } catch (IOException e) { e.printStackTrace() fail("Cannot setup jdk jar hell classpath") } } - private static File targetFile(File projectRoot, String version) { - File targetFile = new File(projectRoot, "elasticsearch-core-${version}.jar") + private static File targetFile(File projectRoot, String artifactName, String version) { + File targetFile = new File(projectRoot, "${artifactName}-${version}.jar") println "targetFile = $targetFile" targetFile.getParentFile().mkdirs() diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/StableApiJarMocks.java b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/StableApiJarMocks.java index d5d45807f062..a625fef61903 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/StableApiJarMocks.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/gradle/internal/test/StableApiJarMocks.java @@ -11,8 +11,8 @@ import net.bytebuddy.ByteBuddy; import net.bytebuddy.dynamic.DynamicType; -import org.elasticsearch.plugin.api.Extensible; -import org.elasticsearch.plugin.api.NamedComponent; +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.NamedComponent; import org.elasticsearch.plugin.scanner.test_classes.ExtensibleClass; import org.elasticsearch.plugin.scanner.test_classes.ExtensibleInterface; diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/Extensible.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/Extensible.java new file mode 100644 index 000000000000..34984b90d3c7 --- /dev/null +++ b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/Extensible.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.plugin; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.TYPE; + +@Retention(RetentionPolicy.RUNTIME) +@Target(value = { TYPE }) +public @interface Extensible { +} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/NamedComponent.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/NamedComponent.java new file mode 100644 index 000000000000..53b195313ecc --- /dev/null +++ b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/NamedComponent.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.TYPE }) +public @interface NamedComponent { + /** + * The name used for registration and lookup + * @return a name + */ + String value(); +} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/api/Extensible.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/api/Extensible.java deleted file mode 100644 index 1a1f10f11c3e..000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/api/Extensible.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.plugin.api; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; - -@Retention(RetentionPolicy.RUNTIME) -@Target(value = { TYPE }) -public @interface Extensible { -} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/api/NamedComponent.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/api/NamedComponent.java deleted file mode 100644 index 65a1a0d46abd..000000000000 --- a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/api/NamedComponent.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugin.api; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -@Retention(RetentionPolicy.RUNTIME) -@Target({ ElementType.TYPE }) -public @interface NamedComponent { - /** - * The name used for registration and lookup - * @return a name - */ - String value(); -} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass.java index b76f6dd93f9d..50ea17993d7c 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleClass.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugin.scanner.test_classes; -import org.elasticsearch.plugin.api.Extensible; +import org.elasticsearch.plugin.Extensible; @Extensible public class ExtensibleClass {} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleInterface.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleInterface.java index c630bf216883..784136bf6f5a 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleInterface.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/ExtensibleInterface.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugin.scanner.test_classes; -import org.elasticsearch.plugin.api.Extensible; +import org.elasticsearch.plugin.Extensible; @Extensible public interface ExtensibleInterface {} diff --git a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/TestNamedComponent.java b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/TestNamedComponent.java index a98e6b08a797..9a6b9b648e3a 100644 --- a/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/TestNamedComponent.java +++ b/build-tools/src/testFixtures/java/org/elasticsearch/plugin/scanner/test_classes/TestNamedComponent.java @@ -8,7 +8,9 @@ package org.elasticsearch.plugin.scanner.test_classes; -@org.elasticsearch.plugin.api.NamedComponent("test_named_component") +import org.elasticsearch.plugin.NamedComponent; + +@NamedComponent("test_named_component") public class TestNamedComponent implements ExtensibleInterface { } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index be2be6f0461e..500870fb593c 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -25,7 +25,6 @@ restResources { dependencies { api project(':modules:mapper-extras') api project(':modules:parent-join') - api project(':modules:aggs-matrix-stats') api project(':modules:rank-eval') api project(':modules:lang-mustache') api project(':modules:aggregations') diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GetAliasesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GetAliasesResponse.java deleted file mode 100644 index c5a1d97b7edf..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GetAliasesResponse.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.metadata.AliasMetadata; -import org.elasticsearch.common.xcontent.StatusToXContentObject; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -/** - * Response obtained from the get aliases API. - * The format is pretty horrible as it holds aliases, but at the same time errors can come back through the status and error fields. - * Such errors are mostly 404 - NOT FOUND for aliases that were specified but not found. In such case the client won't throw exception - * so it allows to retrieve the returned aliases, while at the same time checking if errors were returned. - * There's also the case where an exception is returned, like for instance an {@link org.elasticsearch.index.IndexNotFoundException}. - * We would usually throw such exception, but we configure the client to not throw for 404 to support the case above, hence we also not - * throw in case an index is not found, although it is a hard error that doesn't come back with aliases. - */ -public class GetAliasesResponse implements StatusToXContentObject { - - private final RestStatus status; - private final String error; - private final ElasticsearchException exception; - - private final Map> aliases; - - GetAliasesResponse(RestStatus status, String error, Map> aliases) { - this.status = status; - this.error = error; - this.aliases = aliases; - this.exception = null; - } - - private GetAliasesResponse(RestStatus status, ElasticsearchException exception) { - this.status = status; - this.error = null; - this.aliases = Collections.emptyMap(); - this.exception = exception; - } - - @Override - public RestStatus status() { - return status; - } - - /** - * Return the possibly returned error, null otherwise - */ - public String getError() { - return error; - } - - /** - * Return the exception that may have been returned - */ - public ElasticsearchException getException() { - return exception; - } - - /** - * Return the requested aliases - */ - public Map> getAliases() { - return aliases; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - if (status != RestStatus.OK) { - builder.field("error", error); - builder.field("status", status.getStatus()); - } - - for (Map.Entry> entry : aliases.entrySet()) { - builder.startObject(entry.getKey()); - { - builder.startObject("aliases"); - { - for (final AliasMetadata alias : entry.getValue()) { - AliasMetadata.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS); - } - } - builder.endObject(); - } - builder.endObject(); - } - } - builder.endObject(); - return builder; - } - - /** - * Parse the get aliases response - */ - public static GetAliasesResponse fromXContent(XContentParser parser) throws IOException { - if (parser.currentToken() == null) { - parser.nextToken(); - } - ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser); - Map> aliases = new HashMap<>(); - - String currentFieldName; - Token token; - String error = null; - ElasticsearchException exception = null; - RestStatus status = RestStatus.OK; - - while (parser.nextToken() != Token.END_OBJECT) { - if (parser.currentToken() == Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - - if ("status".equals(currentFieldName)) { - if ((token = parser.nextToken()) != Token.FIELD_NAME) { - ensureExpectedToken(Token.VALUE_NUMBER, token, parser); - status = RestStatus.fromCode(parser.intValue()); - } - } else if ("error".equals(currentFieldName)) { - token = parser.nextToken(); - if (token == Token.VALUE_STRING) { - error = parser.text(); - } else if (token == Token.START_OBJECT) { - parser.nextToken(); - exception = ElasticsearchException.innerFromXContent(parser, true); - } else if (token == Token.START_ARRAY) { - parser.skipChildren(); - } - } else { - String indexName = parser.currentName(); - if (parser.nextToken() == Token.START_OBJECT) { - Set parseInside = parseAliases(parser); - aliases.put(indexName, parseInside); - } - } - } - } - if (exception != null) { - assert error == null; - assert aliases.isEmpty(); - return new GetAliasesResponse(status, exception); - } - return new GetAliasesResponse(status, error, aliases); - } - - private static Set parseAliases(XContentParser parser) throws IOException { - Set aliases = new HashSet<>(); - Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - if (token == Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == Token.START_OBJECT) { - if ("aliases".equals(currentFieldName)) { - while (parser.nextToken() != Token.END_OBJECT) { - AliasMetadata fromXContent = AliasMetadata.Builder.fromXContent(parser); - aliases.add(fromXContent); - } - } else { - parser.skipChildren(); - } - } else if (token == Token.START_ARRAY) { - parser.skipChildren(); - } - } - return aliases; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java deleted file mode 100644 index a0c38498591f..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; - -/** - * Base class for responses that are node responses. These responses always contain the cluster - * name and the {@link NodesResponseHeader}. - */ -public abstract class NodesResponse { - - private final NodesResponseHeader header; - private final String clusterName; - - protected NodesResponse(NodesResponseHeader header, String clusterName) { - this.header = header; - this.clusterName = clusterName; - } - - /** - * Get the cluster name associated with all of the nodes. - * - * @return Never {@code null}. - */ - public String getClusterName() { - return clusterName; - } - - /** - * Gets information about the number of total, successful and failed nodes the request was run on. - * Also includes exceptions if relevant. - */ - public NodesResponseHeader getHeader() { - return header; - } - - public static void declareCommonNodesResponseParsing(ConstructingObjectParser parser) { - parser.declareObject(ConstructingObjectParser.constructorArg(), NodesResponseHeader::fromXContent, new ParseField("_nodes")); - parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponseHeader.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponseHeader.java deleted file mode 100644 index e22326dc88fb..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponseHeader.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.support.nodes.BaseNodesResponse; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Objects; - -/** - * A utility class to parse the Nodes Header returned by - * {@link RestActions#buildNodesHeader(XContentBuilder, ToXContent.Params, BaseNodesResponse)}. - */ -public final class NodesResponseHeader { - - public static final ParseField TOTAL = new ParseField("total"); - public static final ParseField SUCCESSFUL = new ParseField("successful"); - public static final ParseField FAILED = new ParseField("failed"); - public static final ParseField FAILURES = new ParseField("failures"); - - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "nodes_response_header", - true, - (a) -> { - int i = 0; - int total = (Integer) a[i++]; - int successful = (Integer) a[i++]; - int failed = (Integer) a[i++]; - List failures = (List) a[i++]; - return new NodesResponseHeader(total, successful, failed, failures); - } - ); - - static { - PARSER.declareInt(ConstructingObjectParser.constructorArg(), TOTAL); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), SUCCESSFUL); - PARSER.declareInt(ConstructingObjectParser.constructorArg(), FAILED); - PARSER.declareObjectArray( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - FAILURES - ); - } - - private final int total; - private final int successful; - private final int failed; - private final List failures; - - public NodesResponseHeader(int total, int successful, int failed, @Nullable List failures) { - this.total = total; - this.successful = successful; - this.failed = failed; - this.failures = failures == null ? Collections.emptyList() : failures; - } - - public static NodesResponseHeader fromXContent(XContentParser parser, Void context) throws IOException { - return PARSER.parse(parser, context); - } - - /** the total number of nodes that the operation was carried on */ - public int getTotal() { - return total; - } - - /** the number of nodes that the operation has failed on */ - public int getFailed() { - return failed; - } - - /** the number of nodes that the operation was successful on */ - public int getSuccessful() { - return successful; - } - - /** - * Get the failed node exceptions. - * - * @return Never {@code null}. Can be empty. - */ - public List getFailures() { - return failures; - } - - /** - * Determine if there are any node failures in {@link #failures}. - * - * @return {@code true} if {@link #failures} contains at least 1 exception. - */ - public boolean hasFailures() { - return failures.isEmpty() == false; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - NodesResponseHeader that = (NodesResponseHeader) o; - return total == that.total && successful == that.successful && failed == that.failed && Objects.equals(failures, that.failures); - } - - @Override - public int hashCode() { - return Objects.hash(total, successful, failed, failures); - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 1cba9261ec09..fca1e5d29efa 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -9,66 +9,33 @@ package org.elasticsearch.client; import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; -import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.explain.ExplainRequest; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.ClearScrollRequest; -import org.elasticsearch.action.search.ClosePointInTimeRequest; -import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.core.CountRequest; -import org.elasticsearch.client.core.GetSourceRequest; -import org.elasticsearch.client.core.MultiTermVectorsRequest; -import org.elasticsearch.client.core.TermVectorsRequest; -import org.elasticsearch.client.internal.Requests; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.rankeval.RankEvalRequest; -import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; -import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.index.reindex.ReindexRequest; -import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; -import org.elasticsearch.script.mustache.SearchTemplateRequest; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -78,9 +45,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; -import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.StringJoiner; @@ -92,27 +57,6 @@ private RequestConverters() { // Contains only status utility methods } - static Request delete(DeleteRequest deleteRequest) { - String endpoint = endpoint(deleteRequest.index(), deleteRequest.id()); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - - Params parameters = new Params(); - parameters.withRouting(deleteRequest.routing()); - parameters.withTimeout(deleteRequest.timeout()); - parameters.withVersion(deleteRequest.version()); - parameters.withVersionType(deleteRequest.versionType()); - parameters.withIfSeqNo(deleteRequest.ifSeqNo()); - parameters.withIfPrimaryTerm(deleteRequest.ifPrimaryTerm()); - parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy()); - parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards()); - request.addParameters(parameters.asMap()); - return request; - } - - static Request info() { - return new Request(HttpGet.METHOD_NAME, "/"); - } - static Request bulk(BulkRequest bulkRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_bulk"); @@ -246,64 +190,6 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { return request; } - static Request exists(GetRequest getRequest) { - return getStyleRequest(HttpHead.METHOD_NAME, getRequest); - } - - static Request get(GetRequest getRequest) { - return getStyleRequest(HttpGet.METHOD_NAME, getRequest); - } - - private static Request getStyleRequest(String method, GetRequest getRequest) { - Request request = new Request(method, endpoint(getRequest.index(), getRequest.id())); - - Params parameters = new Params(); - parameters.withPreference(getRequest.preference()); - parameters.withRouting(getRequest.routing()); - parameters.withRefresh(getRequest.refresh()); - parameters.withRealtime(getRequest.realtime()); - parameters.withStoredFields(getRequest.storedFields()); - parameters.withVersion(getRequest.version()); - parameters.withVersionType(getRequest.versionType()); - parameters.withFetchSourceContext(getRequest.fetchSourceContext()); - request.addParameters(parameters.asMap()); - return request; - } - - static Request sourceExists(GetSourceRequest getSourceRequest) { - return sourceRequest(getSourceRequest, HttpHead.METHOD_NAME); - } - - static Request getSource(GetSourceRequest getSourceRequest) { - return sourceRequest(getSourceRequest, HttpGet.METHOD_NAME); - } - - private static Request sourceRequest(GetSourceRequest getSourceRequest, String httpMethodName) { - Params parameters = new Params(); - parameters.withPreference(getSourceRequest.preference()); - parameters.withRouting(getSourceRequest.routing()); - parameters.withRefresh(getSourceRequest.refresh()); - parameters.withRealtime(getSourceRequest.realtime()); - parameters.withFetchSourceContext(getSourceRequest.fetchSourceContext()); - - String endpoint = endpoint(getSourceRequest.index(), "_source", getSourceRequest.id()); - Request request = new Request(httpMethodName, endpoint); - request.addParameters(parameters.asMap()); - return request; - } - - static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_mget"); - - Params parameters = new Params(); - parameters.withPreference(multiGetRequest.preference()); - parameters.withRealtime(multiGetRequest.realtime()); - parameters.withRefresh(multiGetRequest.refresh()); - request.addParameters(parameters.asMap()); - request.setEntity(createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - static Request index(IndexRequest indexRequest) { String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; @@ -335,57 +221,6 @@ static Request index(IndexRequest indexRequest) { return request; } - static Request ping() { - return new Request(HttpHead.METHOD_NAME, "/"); - } - - static Request update(UpdateRequest updateRequest) throws IOException { - String endpoint = endpoint(updateRequest.index(), "_update", updateRequest.id()); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - - Params parameters = new Params(); - parameters.withRouting(updateRequest.routing()); - parameters.withTimeout(updateRequest.timeout()); - parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); - parameters.withWaitForActiveShards(updateRequest.waitForActiveShards()); - parameters.withDocAsUpsert(updateRequest.docAsUpsert()); - parameters.withFetchSourceContext(updateRequest.fetchSource()); - parameters.withRetryOnConflict(updateRequest.retryOnConflict()); - parameters.withVersion(updateRequest.version()); - parameters.withVersionType(updateRequest.versionType()); - parameters.withRequireAlias(updateRequest.isRequireAlias()); - - // The Java API allows update requests with different content types - // set for the partial document and the upsert document. This client - // only accepts update requests that have the same content types set - // for both doc and upsert. - XContentType xContentType = null; - if (updateRequest.doc() != null) { - xContentType = updateRequest.doc().getContentType(); - } - if (updateRequest.upsertRequest() != null) { - XContentType upsertContentType = updateRequest.upsertRequest().getContentType(); - if ((xContentType != null) && (xContentType != upsertContentType)) { - throw new IllegalStateException( - "Update request cannot have different content types for doc [" - + xContentType - + "]" - + " and upsert [" - + upsertContentType - + "] documents" - ); - } else { - xContentType = upsertContentType; - } - } - if (xContentType == null) { - xContentType = Requests.INDEX_CONTENT_TYPE; - } - request.addParameters(parameters.asMap()); - request.setEntity(createEntity(updateRequest, xContentType)); - return request; - } - /** * Convert a {@linkplain SearchRequest} into a {@linkplain Request}. * @param searchRequest the request to convert @@ -439,326 +274,6 @@ static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOEx return request; } - static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException { - Request request = new Request(HttpDelete.METHOD_NAME, "/_search/scroll"); - request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request openPointInTime(OpenPointInTimeRequest openRequest) { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(openRequest.indices(), "_pit")); - Params params = new Params(); - if (OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS.equals(openRequest.indicesOptions()) == false) { - params.withIndicesOptions(openRequest.indicesOptions()); - } - params.withRouting(openRequest.routing()); - params.withPreference(openRequest.preference()); - params.putParam("keep_alive", openRequest.keepAlive()); - request.addParameters(params.asMap()); - return request; - } - - static Request closePointInTime(ClosePointInTimeRequest closeRequest) throws IOException { - Request request = new Request(HttpDelete.METHOD_NAME, "/_pit"); - request.setEntity(createEntity(closeRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); - - Params params = new Params(); - params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); - if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { - params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); - } - - XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); - byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); - request.addParameters(params.asMap()); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); - return request; - } - - static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throws IOException { - Request request; - - if (searchTemplateRequest.isSimulate()) { - request = new Request(HttpGet.METHOD_NAME, "_render/template"); - } else { - SearchRequest searchRequest = searchTemplateRequest.getRequest(); - String endpoint = endpoint(searchRequest.indices(), "_search/template"); - request = new Request(HttpPost.METHOD_NAME, endpoint); - - Params params = new Params(); - addSearchRequestParams(params, searchRequest); - request.addParameters(params.asMap()); - } - - request.setEntity(createEntity(searchTemplateRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplateRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, "/_msearch/template"); - - Params params = new Params(); - params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); - if (multiSearchTemplateRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { - params.putParam("max_concurrent_searches", Integer.toString(multiSearchTemplateRequest.maxConcurrentSearchRequests())); - } - request.addParameters(params.asMap()); - - XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); - byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); - return request; - } - - static Request count(CountRequest countRequest) throws IOException { - Request request = new Request(HttpPost.METHOD_NAME, endpoint(countRequest.indices(), countRequest.types(), "_count")); - Params params = new Params(); - params.withRouting(countRequest.routing()); - params.withPreference(countRequest.preference()); - params.withIndicesOptions(countRequest.indicesOptions()); - if (countRequest.terminateAfter() != 0) { - params.withTerminateAfter(countRequest.terminateAfter()); - } - if (countRequest.minScore() != null) { - params.putParam("min_score", String.valueOf(countRequest.minScore())); - } - request.addParameters(params.asMap()); - request.setEntity(createEntity(countRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request explain(ExplainRequest explainRequest) throws IOException { - String endpoint = endpoint(explainRequest.index(), "_explain", explainRequest.id()); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - - Params params = new Params(); - params.withStoredFields(explainRequest.storedFields()); - params.withFetchSourceContext(explainRequest.fetchSourceContext()); - params.withRouting(explainRequest.routing()); - params.withPreference(explainRequest.preference()); - request.addParameters(params.asMap()); - request.setEntity(createEntity(explainRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) throws IOException { - String methodName = fieldCapabilitiesRequest.indexFilter() != null ? HttpPost.METHOD_NAME : HttpGet.METHOD_NAME; - Request request = new Request(methodName, endpoint(fieldCapabilitiesRequest.indices(), "_field_caps")); - - Params params = new Params(); - params.withFields(fieldCapabilitiesRequest.fields()); - if (FieldCapabilitiesRequest.DEFAULT_INDICES_OPTIONS.equals(fieldCapabilitiesRequest.indicesOptions()) == false) { - params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); - } - request.addParameters(params.asMap()); - if (fieldCapabilitiesRequest.indexFilter() != null) { - request.setEntity(createEntity(fieldCapabilitiesRequest, REQUEST_BODY_CONTENT_TYPE)); - } - return request; - } - - static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { - Request request = new Request(HttpGet.METHOD_NAME, endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval")); - - Params params = new Params(); - if (SearchRequest.DEFAULT_INDICES_OPTIONS.equals(rankEvalRequest.indicesOptions()) == false) { - params.withIndicesOptions(rankEvalRequest.indicesOptions()); - } - params.putParam("search_type", rankEvalRequest.searchType().name().toLowerCase(Locale.ROOT)); - request.addParameters(params.asMap()); - request.setEntity(createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request reindex(ReindexRequest reindexRequest) throws IOException { - return prepareReindexRequest(reindexRequest, true); - } - - static Request deleteByQuery(DeleteByQueryRequest deleteByQueryRequest) throws IOException { - return prepareDeleteByQueryRequest(deleteByQueryRequest, true); - } - - static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { - return prepareUpdateByQueryRequest(updateByQueryRequest, true); - } - - private static Request prepareReindexRequest(ReindexRequest reindexRequest, boolean waitForCompletion) throws IOException { - String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params().withWaitForCompletion(waitForCompletion) - .withRefresh(reindexRequest.isRefresh()) - .withTimeout(reindexRequest.getTimeout()) - .withWaitForActiveShards(reindexRequest.getWaitForActiveShards()) - .withRequestsPerSecond(reindexRequest.getRequestsPerSecond()) - .withSlices(reindexRequest.getSlices()) - .withRequireAlias(reindexRequest.getDestination().isRequireAlias()); - - if (reindexRequest.getScrollTime() != null) { - params.putParam("scroll", reindexRequest.getScrollTime()); - } - - request.addParameters(params.asMap()); - request.setEntity(createEntity(reindexRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - private static Request prepareDeleteByQueryRequest(DeleteByQueryRequest deleteByQueryRequest, boolean waitForCompletion) - throws IOException { - String endpoint = endpoint(deleteByQueryRequest.indices(), "_delete_by_query"); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params().withRouting(deleteByQueryRequest.getRouting()) - .withRefresh(deleteByQueryRequest.isRefresh()) - .withTimeout(deleteByQueryRequest.getTimeout()) - .withWaitForActiveShards(deleteByQueryRequest.getWaitForActiveShards()) - .withRequestsPerSecond(deleteByQueryRequest.getRequestsPerSecond()) - .withWaitForCompletion(waitForCompletion) - .withSlices(deleteByQueryRequest.getSlices()); - - if (SearchRequest.DEFAULT_INDICES_OPTIONS.equals(deleteByQueryRequest.indicesOptions()) == false) { - params = params.withIndicesOptions(deleteByQueryRequest.indicesOptions()); - } - - if (deleteByQueryRequest.isAbortOnVersionConflict() == false) { - params.putParam("conflicts", "proceed"); - } - if (deleteByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { - params.putParam("scroll_size", Integer.toString(deleteByQueryRequest.getBatchSize())); - } - if (deleteByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { - params.putParam("scroll", deleteByQueryRequest.getScrollTime()); - } - if (deleteByQueryRequest.getMaxDocs() > 0) { - params.putParam("max_docs", Integer.toString(deleteByQueryRequest.getMaxDocs())); - } - request.addParameters(params.asMap()); - request.setEntity(createEntity(deleteByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request prepareUpdateByQueryRequest(UpdateByQueryRequest updateByQueryRequest, boolean waitForCompletion) throws IOException { - String endpoint = endpoint(updateByQueryRequest.indices(), "_update_by_query"); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params().withRouting(updateByQueryRequest.getRouting()) - .withPipeline(updateByQueryRequest.getPipeline()) - .withRefresh(updateByQueryRequest.isRefresh()) - .withTimeout(updateByQueryRequest.getTimeout()) - .withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards()) - .withRequestsPerSecond(updateByQueryRequest.getRequestsPerSecond()) - .withWaitForCompletion(waitForCompletion) - .withSlices(updateByQueryRequest.getSlices()); - if (SearchRequest.DEFAULT_INDICES_OPTIONS.equals(updateByQueryRequest.indicesOptions()) == false) { - params = params.withIndicesOptions(updateByQueryRequest.indicesOptions()); - } - if (updateByQueryRequest.isAbortOnVersionConflict() == false) { - params.putParam("conflicts", "proceed"); - } - if (updateByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { - params.putParam("scroll_size", Integer.toString(updateByQueryRequest.getBatchSize())); - } - if (updateByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { - params.putParam("scroll", updateByQueryRequest.getScrollTime()); - } - if (updateByQueryRequest.getMaxDocs() > 0) { - params.putParam("max_docs", Integer.toString(updateByQueryRequest.getMaxDocs())); - } - request.addParameters(params.asMap()); - request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request rethrottleReindex(RethrottleRequest rethrottleRequest) { - return rethrottle(rethrottleRequest, "_reindex"); - } - - static Request rethrottleUpdateByQuery(RethrottleRequest rethrottleRequest) { - return rethrottle(rethrottleRequest, "_update_by_query"); - } - - static Request rethrottleDeleteByQuery(RethrottleRequest rethrottleRequest) { - return rethrottle(rethrottleRequest, "_delete_by_query"); - } - - private static Request rethrottle(RethrottleRequest rethrottleRequest, String firstPathPart) { - String endpoint = new EndpointBuilder().addPathPart(firstPathPart) - .addPathPart(rethrottleRequest.getTaskId().toString()) - .addPathPart("_rethrottle") - .build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params().withRequestsPerSecond(rethrottleRequest.getRequestsPerSecond()); - // we set "group_by" to "none" because this is the response format we can parse back - params.putParam("group_by", "none"); - request.addParameters(params.asMap()); - return request; - } - - static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws IOException { - String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(putStoredScriptRequest.id()).build(); - Request request = new Request(HttpPost.METHOD_NAME, endpoint); - Params params = new Params(); - params.withTimeout(putStoredScriptRequest.timeout()); - params.withMasterTimeout(putStoredScriptRequest.masterNodeTimeout()); - if (Strings.hasText(putStoredScriptRequest.context())) { - params.putParam("context", putStoredScriptRequest.context()); - } - request.addParameters(params.asMap()); - request.setEntity(createEntity(putStoredScriptRequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request termVectors(TermVectorsRequest tvrequest) throws IOException { - String endpoint; - if (tvrequest.getType() != null) { - endpoint = new EndpointBuilder().addPathPart(tvrequest.getIndex(), tvrequest.getType(), tvrequest.getId()) - .addPathPartAsIs("_termvectors") - .build(); - } else { - endpoint = new EndpointBuilder().addPathPart(tvrequest.getIndex()) - .addPathPartAsIs("_termvectors") - .addPathPart(tvrequest.getId()) - .build(); - } - - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(); - params.withRouting(tvrequest.getRouting()); - params.withPreference(tvrequest.getPreference()); - params.withRealtime(tvrequest.getRealtime()); - request.addParameters(params.asMap()); - request.setEntity(createEntity(tvrequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request mtermVectors(MultiTermVectorsRequest mtvrequest) throws IOException { - String endpoint = "_mtermvectors"; - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - request.setEntity(createEntity(mtvrequest, REQUEST_BODY_CONTENT_TYPE)); - return request; - } - - static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { - String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); - Request request = new Request(HttpGet.METHOD_NAME, endpoint); - Params params = new Params(); - params.withMasterTimeout(getStoredScriptRequest.masterNodeTimeout()); - request.addParameters(params.asMap()); - return request; - } - - static Request deleteScript(DeleteStoredScriptRequest deleteStoredScriptRequest) { - String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(deleteStoredScriptRequest.id()).build(); - Request request = new Request(HttpDelete.METHOD_NAME, endpoint); - Params params = new Params(); - params.withTimeout(deleteStoredScriptRequest.timeout()); - params.withMasterTimeout(deleteStoredScriptRequest.masterNodeTimeout()); - request.addParameters(params.asMap()); - return request; - } - static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS); } @@ -783,10 +298,6 @@ static String endpoint(String index, String type, String id, String endpoint) { return new EndpointBuilder().addPathPart(index, type, id).addPathPartAsIs(endpoint).build(); } - static String endpoint(String[] indices) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices).build(); - } - static String endpoint(String[] indices, String endpoint) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).build(); } @@ -799,13 +310,6 @@ static String endpoint(String[] indices, String[] types, String endpoint) { .build(); } - static String endpoint(String[] indices, String endpoint, String[] suffixes) { - return new EndpointBuilder().addCommaSeparatedPathParts(indices) - .addPathPartAsIs(endpoint) - .addCommaSeparatedPathParts(suffixes) - .build(); - } - @Deprecated static String endpoint(String[] indices, String endpoint, String type) { return new EndpointBuilder().addCommaSeparatedPathParts(indices).addPathPartAsIs(endpoint).addPathPart(type).build(); @@ -849,39 +353,6 @@ Map asMap() { return parameters; } - Params withDocAsUpsert(boolean docAsUpsert) { - if (docAsUpsert) { - return putParam("doc_as_upsert", Boolean.TRUE.toString()); - } - return this; - } - - Params withFetchSourceContext(FetchSourceContext fetchSourceContext) { - if (fetchSourceContext != null) { - if (fetchSourceContext.fetchSource() == false) { - putParam("_source", Boolean.FALSE.toString()); - } - if (CollectionUtils.isEmpty(fetchSourceContext.includes()) == false) { - putParam("_source_includes", String.join(",", fetchSourceContext.includes())); - } - if (CollectionUtils.isEmpty(fetchSourceContext.excludes()) == false) { - putParam("_source_excludes", String.join(",", fetchSourceContext.excludes())); - } - } - return this; - } - - Params withFields(String[] fields) { - if (CollectionUtils.isEmpty(fields) == false) { - return putParam("fields", String.join(",", fields)); - } - return this; - } - - Params withMasterTimeout(TimeValue masterTimeout) { - return putParam("master_timeout", masterTimeout); - } - Params withPipeline(String pipeline) { return putParam("pipeline", pipeline); } @@ -910,20 +381,6 @@ Params withAllowPartialResults(boolean allowPartialSearchResults) { return putParam("allow_partial_search_results", Boolean.toString(allowPartialSearchResults)); } - Params withRealtime(boolean realtime) { - if (realtime == false) { - return putParam("realtime", Boolean.FALSE.toString()); - } - return this; - } - - Params withRefresh(boolean refresh) { - if (refresh) { - return withRefreshPolicy(RefreshPolicy.IMMEDIATE); - } - return this; - } - Params withRefreshPolicy(RefreshPolicy refreshPolicy) { if (refreshPolicy != RefreshPolicy.NONE) { return putParam("refresh", refreshPolicy.getValue()); @@ -931,46 +388,10 @@ Params withRefreshPolicy(RefreshPolicy refreshPolicy) { return this; } - Params withRequestsPerSecond(float requestsPerSecond) { - // the default in AbstractBulkByScrollRequest is Float.POSITIVE_INFINITY, - // but we don't want to add that to the URL parameters, instead we use -1 - if (Float.isFinite(requestsPerSecond)) { - return putParam(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, Float.toString(requestsPerSecond)); - } else { - return putParam(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, "-1"); - } - } - - Params withRetryOnConflict(int retryOnConflict) { - if (retryOnConflict > 0) { - return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); - } - return this; - } - Params withRouting(String routing) { return putParam("routing", routing); } - Params withSlices(int slices) { - if (slices == 0) { - // translate to "auto" value in rest request so the receiving end doesn't throw error - return putParam("slices", AbstractBulkByScrollRequest.AUTO_SLICES_VALUE); - } - return putParam("slices", String.valueOf(slices)); - } - - Params withStoredFields(String[] storedFields) { - if (CollectionUtils.isEmpty(storedFields) == false) { - return putParam("stored_fields", String.join(",", storedFields)); - } - return this; - } - - Params withTerminateAfter(int terminateAfter) { - return putParam("terminate_after", String.valueOf(terminateAfter)); - } - Params withTimeout(TimeValue timeout) { return putParam("timeout", timeout); } @@ -1049,103 +470,6 @@ Params withIgnoreUnavailable(boolean ignoreUnavailable) { putParam("ignore_unavailable", Boolean.toString(ignoreUnavailable)); return this; } - - Params withHuman(boolean human) { - if (human) { - putParam("human", Boolean.toString(human)); - } - return this; - } - - Params withLocal(boolean local) { - if (local) { - putParam("local", Boolean.toString(local)); - } - return this; - } - - Params withIncludeDefaults(boolean includeDefaults) { - if (includeDefaults) { - return putParam("include_defaults", Boolean.TRUE.toString()); - } - return this; - } - - Params withPreserveExisting(boolean preserveExisting) { - if (preserveExisting) { - return putParam("preserve_existing", Boolean.TRUE.toString()); - } - return this; - } - - Params withDetailed(boolean detailed) { - if (detailed) { - return putParam("detailed", Boolean.TRUE.toString()); - } - return this; - } - - Params withWaitForCompletion(Boolean waitForCompletion) { - return putParam("wait_for_completion", waitForCompletion.toString()); - } - - Params withNodes(String[] nodes) { - return withNodes(Arrays.asList(nodes)); - } - - Params withNodes(List nodes) { - if (nodes != null && nodes.size() > 0) { - return putParam("nodes", String.join(",", nodes)); - } - return this; - } - - Params withActions(String[] actions) { - return withActions(Arrays.asList(actions)); - } - - Params withActions(List actions) { - if (actions != null && actions.size() > 0) { - return putParam("actions", String.join(",", actions)); - } - return this; - } - - Params withWaitForStatus(ClusterHealthStatus status) { - if (status != null) { - return putParam("wait_for_status", status.name().toLowerCase(Locale.ROOT)); - } - return this; - } - - Params withWaitForNoRelocatingShards(boolean waitNoRelocatingShards) { - if (waitNoRelocatingShards) { - return putParam("wait_for_no_relocating_shards", Boolean.TRUE.toString()); - } - return this; - } - - Params withWaitForNoInitializingShards(boolean waitNoInitShards) { - if (waitNoInitShards) { - return putParam("wait_for_no_initializing_shards", Boolean.TRUE.toString()); - } - return this; - } - - Params withWaitForNodes(String waitForNodes) { - return putParam("wait_for_nodes", waitForNodes); - } - - Params withLevel(ClusterHealthRequest.Level level) { - return putParam("level", level.name().toLowerCase(Locale.ROOT)); - } - - Params withWaitForEvents(Priority waitForEvents) { - if (waitForEvents != null) { - return putParam("wait_for_events", waitForEvents.name().toLowerCase(Locale.ROOT)); - } - return this; - } } /** @@ -1199,11 +523,6 @@ EndpointBuilder addCommaSeparatedPathParts(String[] parts) { return this; } - EndpointBuilder addCommaSeparatedPathParts(List parts) { - addPathPart(String.join(",", parts)); - return this; - } - EndpointBuilder addPathPartAsIs(String... parts) { for (String part : parts) { if (Strings.hasLength(part)) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 1ccdb1ae58df..9b8e92b65981 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -19,25 +19,11 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.explain.ExplainRequest; -import org.elasticsearch.action.explain.ExplainResponse; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.get.MultiGetRequest; -import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.MultiSearchRequest; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; import org.elasticsearch.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; @@ -49,23 +35,12 @@ import org.elasticsearch.client.analytics.ParsedTopMetrics; import org.elasticsearch.client.analytics.StringStatsAggregationBuilder; import org.elasticsearch.client.analytics.TopMetricsAggregationBuilder; -import org.elasticsearch.client.core.CountRequest; -import org.elasticsearch.client.core.CountResponse; -import org.elasticsearch.client.core.GetSourceRequest; -import org.elasticsearch.client.core.GetSourceResponse; -import org.elasticsearch.client.core.MainRequest; import org.elasticsearch.client.core.MainResponse; -import org.elasticsearch.client.core.TermVectorsRequest; -import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.index.reindex.ReindexRequest; -import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -198,7 +173,6 @@ import java.util.stream.Stream; import static java.util.Collections.emptySet; -import static java.util.Collections.singleton; import static java.util.stream.Collectors.toList; /** @@ -360,223 +334,6 @@ public final Cancellable bulkAsync(BulkRequest bulkRequest, RequestOptions optio ); } - /** - * Executes a reindex request. - * See Reindex API on elastic.co - * @param reindexRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - reindexRequest, - RequestConverters::reindex, - options, - BulkByScrollResponse::fromXContent, - singleton(409) - ); - } - - /** - * Executes a update by query request. - * See - * Update By Query API on elastic.co - * @param updateByQueryRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - updateByQueryRequest, - RequestConverters::updateByQuery, - options, - BulkByScrollResponse::fromXContent, - singleton(409) - ); - } - - /** - * Executes a delete by query request. - * See - * Delete By Query API on elastic.co - * @param deleteByQueryRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - deleteByQueryRequest, - RequestConverters::deleteByQuery, - options, - BulkByScrollResponse::fromXContent, - singleton(409) - ); - } - - /** - * Get the cluster info otherwise provided when sending an HTTP request to '/' - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final MainResponse info(RequestOptions options) throws IOException { - return performRequestAndParseEntity( - new MainRequest(), - (request) -> RequestConverters.info(), - options, - MainResponse::fromXContent, - emptySet() - ); - } - - /** - * Retrieves a document by id using the Get API. - * See Get API on elastic.co - * @param getRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final GetResponse get(GetRequest getRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404)); - } - - /** - * Retrieves multiple documents by id using the Multi Get API. - * See Multi Get API on elastic.co - * @param multiGetRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - * @deprecated use {@link #mget(MultiGetRequest, RequestOptions)} instead - */ - @Deprecated - public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException { - return mget(multiGetRequest, options); - } - - /** - * Retrieves multiple documents by id using the Multi Get API. - * See Multi Get API on elastic.co - * @param multiGetRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final MultiGetResponse mget(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - multiGetRequest, - RequestConverters::multiGet, - options, - MultiGetResponse::fromXContent, - singleton(404) - ); - } - - /** - * Asynchronously retrieves multiple documents by id using the Multi Get API. - * See Multi Get API on elastic.co - * @param multiGetRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @deprecated use {@link #mgetAsync(MultiGetRequest, RequestOptions, ActionListener)} instead - * @return cancellable that may be used to cancel the request - */ - @Deprecated - public final Cancellable multiGetAsync( - MultiGetRequest multiGetRequest, - RequestOptions options, - ActionListener listener - ) { - return mgetAsync(multiGetRequest, options, listener); - } - - /** - * Asynchronously retrieves multiple documents by id using the Multi Get API. - * See Multi Get API on elastic.co - * @param multiGetRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable mgetAsync(MultiGetRequest multiGetRequest, RequestOptions options, ActionListener listener) { - return performRequestAsyncAndParseEntity( - multiGetRequest, - RequestConverters::multiGet, - options, - MultiGetResponse::fromXContent, - listener, - singleton(404) - ); - } - - /** - * Checks for the existence of a document. Returns true if it exists, false otherwise. - * See Get API on elastic.co - * @param getRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return true if the document exists, false otherwise - */ - public final boolean exists(GetRequest getRequest, RequestOptions options) throws IOException { - return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet()); - } - - /** - * Checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. - * See Source exists API - * on elastic.co - * @param getRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return true if the document and _source field exists, false otherwise - */ - @Deprecated - public boolean existsSource(GetRequest getRequest, RequestOptions options) throws IOException { - GetSourceRequest getSourceRequest = GetSourceRequest.from(getRequest); - return performRequest( - getSourceRequest, - RequestConverters::sourceExists, - options, - RestHighLevelClient::convertExistsResponse, - emptySet() - ); - } - - /** - * Asynchronously checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. - * See Source exists API - * on elastic.co - * @param getRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - @Deprecated - public final Cancellable existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { - GetSourceRequest getSourceRequest = GetSourceRequest.from(getRequest); - return performRequestAsync( - getSourceRequest, - RequestConverters::sourceExists, - options, - RestHighLevelClient::convertExistsResponse, - listener, - emptySet() - ); - } - - /** - * Retrieves the source field only of a document using GetSource API. - * See Get Source API - * on elastic.co - * @param getSourceRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public GetSourceResponse getSource(GetSourceRequest getSourceRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - getSourceRequest, - RequestConverters::getSource, - options, - GetSourceResponse::fromXContent, - emptySet() - ); - } - /** * Index a document using the Index API. * See Index API on elastic.co @@ -588,45 +345,6 @@ public final IndexResponse index(IndexRequest indexRequest, RequestOptions optio return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); } - /** - * Executes a count request using the Count API. - * See Count API on elastic.co - * @param countRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final CountResponse count(CountRequest countRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(countRequest, RequestConverters::count, options, CountResponse::fromXContent, emptySet()); - } - - /** - * Updates a document using the Update API. - * See Update API on elastic.co - * @param updateRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final UpdateResponse update(UpdateRequest updateRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet()); - } - - /** - * Deletes a document by id using the Delete API. - * See Delete API on elastic.co - * @param deleteRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final DeleteResponse delete(DeleteRequest deleteRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - deleteRequest, - RequestConverters::delete, - options, - DeleteResponse::fromXContent, - singleton(404) - ); - } - /** * Executes a search request using the Search API. * See Search API on elastic.co @@ -663,96 +381,6 @@ public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions ); } - /** - * Executes a multi search using the msearch API. - * See Multi search API on - * elastic.co - * @param multiSearchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - * @deprecated use {@link #msearch(MultiSearchRequest, RequestOptions)} instead - */ - @Deprecated - public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException { - return msearch(multiSearchRequest, options); - } - - /** - * Executes a multi search using the msearch API. - * See Multi search API on - * elastic.co - * @param multiSearchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final MultiSearchResponse msearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - multiSearchRequest, - RequestConverters::multiSearch, - options, - MultiSearchResponse::fromXContext, - emptySet() - ); - } - - /** - * Asynchronously executes a multi search using the msearch API. - * See Multi search API on - * elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @deprecated use {@link #msearchAsync(MultiSearchRequest, RequestOptions, ActionListener)} instead - * @return cancellable that may be used to cancel the request - */ - @Deprecated - public final Cancellable multiSearchAsync( - MultiSearchRequest searchRequest, - RequestOptions options, - ActionListener listener - ) { - return msearchAsync(searchRequest, options, listener); - } - - /** - * Asynchronously executes a multi search using the msearch API. - * See Multi search API on - * elastic.co - * @param searchRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable msearchAsync( - MultiSearchRequest searchRequest, - RequestOptions options, - ActionListener listener - ) { - return performRequestAsyncAndParseEntity( - searchRequest, - RequestConverters::multiSearch, - options, - MultiSearchResponse::fromXContext, - listener, - emptySet() - ); - } - - /** - * Executes a search using the Search Scroll API. - * See Search - * Scroll API on elastic.co - * @param searchScrollRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - * @deprecated use {@link #scroll(SearchScrollRequest, RequestOptions)} instead - */ - @Deprecated - public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { - return scroll(searchScrollRequest, options); - } - /** * Executes a search using the Search Scroll API. * See Search - * Scroll API on elastic.co - * @param searchScrollRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @deprecated use {@link #scrollAsync(SearchScrollRequest, RequestOptions, ActionListener)} instead - * @return cancellable that may be used to cancel the request - */ - @Deprecated - public final Cancellable searchScrollAsync( - SearchScrollRequest searchScrollRequest, - RequestOptions options, - ActionListener listener - ) { - return scrollAsync(searchScrollRequest, options, listener); - } - - /** - * Asynchronously executes a search using the Search Scroll API. - * See Search - * Scroll API on elastic.co - * @param searchScrollRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - */ - public final Cancellable scrollAsync( - SearchScrollRequest searchScrollRequest, - RequestOptions options, - ActionListener listener - ) { - return performRequestAsyncAndParseEntity( - searchScrollRequest, - RequestConverters::searchScroll, - options, - SearchResponse::fromXContent, - listener, - emptySet() - ); - } - - /** - * Executes a request using the Explain API. - * See Explain API on elastic.co - * @param explainRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final ExplainResponse explain(ExplainRequest explainRequest, RequestOptions options) throws IOException { - return performRequest(explainRequest, RequestConverters::explain, options, response -> { - CheckedFunction entityParser = parser -> ExplainResponse.fromXContent( - parser, - convertExistsResponse(response) - ); - return parseEntity(response.getEntity(), entityParser); - }, singleton(404)); - } - - /** - * Calls the Term Vectors API - * - * See Term Vectors API on - * elastic.co - * - * @param request the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - */ - public final TermVectorsResponse termvectors(TermVectorsRequest request, RequestOptions options) throws IOException { - return performRequestAndParseEntity( - request, - RequestConverters::termVectors, - options, - TermVectorsResponse::fromXContent, - emptySet() - ); - } - - /** - * Executes a request using the Field Capabilities API. - * See Field Capabilities API - * on elastic.co. - * @param fieldCapabilitiesRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - */ - public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, RequestOptions options) - throws IOException { - return performRequestAndParseEntity( - fieldCapabilitiesRequest, - RequestConverters::fieldCaps, - options, - FieldCapabilitiesResponse::fromXContent, - emptySet() - ); - } - /** * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. @@ -887,19 +415,6 @@ protected final Resp performRequestAndParseEnt return performRequest(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), ignores); } - /** - * Defines a helper method for performing a request and then parsing the returned entity using the provided entityParser. - */ - protected final Resp performRequestAndParseEntity( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - Set ignores - ) throws IOException { - return performRequest(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), ignores); - } - /** * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. @@ -1019,26 +534,6 @@ protected final Cancellable performRequestAsyn return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); } - /** - * Defines a helper method for asynchronously performing a request. - * @return Cancellable instance that may be used to cancel the request - */ - protected final Cancellable performRequestAsync( - Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, - Set ignores - ) { - Optional validationException = request.validate(); - if (validationException != null && validationException.isPresent()) { - listener.onFailure(validationException.get()); - return Cancellable.NO_OP; - } - return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); - } - /** * Provides common functionality for asynchronously performing a request. * @return Cancellable instance that may be used to cancel the request @@ -1104,37 +599,6 @@ public void onFailure(Exception exception) { }; } - final ResponseListener wrapResponseListener404sOptional( - CheckedFunction responseConverter, - ActionListener> actionListener - ) { - return new ResponseListener() { - @Override - public void onSuccess(Response response) { - try { - actionListener.onResponse(Optional.of(responseConverter.apply(response))); - } catch (Exception e) { - IOException ioe = new IOException("Unable to parse response body for " + response, e); - onFailure(ioe); - } - } - - @Override - public void onFailure(Exception exception) { - if (exception instanceof ResponseException responseException) { - Response response = responseException.getResponse(); - if (RestStatus.NOT_FOUND.getStatus() == response.getStatusLine().getStatusCode()) { - actionListener.onResponse(Optional.empty()); - } else { - actionListener.onFailure(parseResponseException(responseException)); - } - } else { - actionListener.onFailure(exception); - } - } - }; - } - /** * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. * If a response body was returned, tries to parse it as an error returned from Elasticsearch. @@ -1179,10 +643,6 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu } } - protected static boolean convertExistsResponse(Response response) { - return response.getStatusLine().getStatusCode() == 200; - } - private enum EntityType { JSON() { @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java deleted file mode 100644 index a518d74b14c2..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RethrottleRequest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client; - -import org.elasticsearch.tasks.TaskId; - -import java.util.Objects; - -/** - * A request changing throttling of a task. - */ -public class RethrottleRequest implements Validatable { - - static final String REQUEST_PER_SECOND_PARAMETER = "requests_per_second"; - - private final TaskId taskId; - private final float requestsPerSecond; - - /** - * Create a new {@link RethrottleRequest} which disables any throttling for the given taskId. - * @param taskId the task for which throttling will be disabled - */ - public RethrottleRequest(TaskId taskId) { - this.taskId = taskId; - this.requestsPerSecond = Float.POSITIVE_INFINITY; - } - - /** - * Create a new {@link RethrottleRequest} which changes the throttling for the given taskId. - * @param taskId the task that throttling changes will be applied to - * @param requestsPerSecond the number of requests per second that the task should perform. This needs to be a positive value. - */ - public RethrottleRequest(TaskId taskId, float requestsPerSecond) { - Objects.requireNonNull(taskId, "taskId cannot be null"); - if (requestsPerSecond <= 0) { - throw new IllegalArgumentException("requestsPerSecond needs to be positive value but was [" + requestsPerSecond + "]"); - } - this.taskId = taskId; - this.requestsPerSecond = requestsPerSecond; - } - - /** - * @return the task Id - */ - public TaskId getTaskId() { - return taskId; - } - - /** - * @return the requests per seconds value - */ - public float getRequestsPerSecond() { - return requestsPerSecond; - } - - @Override - public String toString() { - return "RethrottleRequest: taskID = " + taskId + "; reqestsPerSecond = " + requestsPerSecond; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java deleted file mode 100644 index a3135b9725eb..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client; - -import org.elasticsearch.core.TimeValue; - -import static org.elasticsearch.core.TimeValue.timeValueSeconds; - -/** - * A base request for any requests that supply timeouts. - * - * Please note, any requests that use a ackTimeout should set timeout as they - * represent the same backing field on the server. - */ -public abstract class TimedRequest implements Validatable { - - public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); - public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); - - private TimeValue timeout = DEFAULT_ACK_TIMEOUT; - private TimeValue masterTimeout = DEFAULT_MASTER_NODE_TIMEOUT; - - /** - * Sets the timeout to wait for the all the nodes to acknowledge - * @param timeout timeout as a {@link TimeValue} - */ - public void setTimeout(TimeValue timeout) { - this.timeout = timeout; - } - - /** - * Sets the timeout to connect to the master node - * @param masterTimeout timeout as a {@link TimeValue} - */ - public void setMasterTimeout(TimeValue masterTimeout) { - this.masterTimeout = masterTimeout; - } - - /** - * Returns the request timeout - */ - public TimeValue timeout() { - return timeout; - } - - /** - * Returns the timeout for the request to be completed on the master node - */ - public TimeValue masterNodeTimeout() { - return masterTimeout; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java index e1dc45e65086..981e6de9c941 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.client.analytics; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.query.QueryRewriteContext; @@ -124,7 +124,7 @@ public boolean equals(Object obj) { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java index eb3d9cf0452c..5af136f75bb7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.client.analytics; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.query.QueryRewriteContext; @@ -101,7 +101,7 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map validate() { - return Optional.empty(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - GetAsyncSearchRequest request = (GetAsyncSearchRequest) o; - return Objects.equals(getId(), request.getId()) - && Objects.equals(getKeepAlive(), request.getKeepAlive()) - && Objects.equals(getWaitForCompletion(), request.getWaitForCompletion()); - } - - @Override - public int hashCode() { - return Objects.hash(getId(), getKeepAlive(), getWaitForCompletion()); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/asyncsearch/SubmitAsyncSearchRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/asyncsearch/SubmitAsyncSearchRequest.java deleted file mode 100644 index 9c381645d6ff..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/asyncsearch/SubmitAsyncSearchRequest.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.asyncsearch; - -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Validatable; -import org.elasticsearch.client.ValidationException; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.search.builder.SearchSourceBuilder; - -import java.util.Objects; -import java.util.Optional; - -/** - * A request to track asynchronously the progress of a search against one or more indices. - */ -public class SubmitAsyncSearchRequest implements Validatable { - - public static long MIN_KEEP_ALIVE = TimeValue.timeValueMinutes(1).millis(); - - private TimeValue waitForCompletionTimeout; - private Boolean keepOnCompletion; - private TimeValue keepAlive; - private final SearchRequest searchRequest; - // The following is optional and will only be sent down with the request if explicitely set by the user - private Integer batchedReduceSize; - - /** - * Creates a new request - */ - public SubmitAsyncSearchRequest(SearchSourceBuilder source, String... indices) { - this.searchRequest = new SearchRequest(indices, source); - } - - /** - * Get the target indices - */ - public String[] getIndices() { - return this.searchRequest.indices(); - } - - /** - * Get the minimum time that the request should wait before returning a partial result (defaults to 1 second). - */ - public TimeValue getWaitForCompletionTimeout() { - return waitForCompletionTimeout; - } - - /** - * Sets the minimum time that the request should wait before returning a partial result (defaults to 1 second). - */ - public void setWaitForCompletionTimeout(TimeValue waitForCompletionTimeout) { - this.waitForCompletionTimeout = waitForCompletionTimeout; - } - - /** - * Returns whether the resource resource should be kept on completion or failure (defaults to false). - */ - public Boolean isKeepOnCompletion() { - return keepOnCompletion; - } - - /** - * Determines if the resource should be kept on completion or failure (defaults to false). - */ - public void setKeepOnCompletion(boolean keepOnCompletion) { - this.keepOnCompletion = keepOnCompletion; - } - - /** - * Get the amount of time after which the result will expire (defaults to 5 days). - */ - public TimeValue getKeepAlive() { - return keepAlive; - } - - /** - * Sets the amount of time after which the result will expire (defaults to 5 days). - */ - public void setKeepAlive(TimeValue keepAlive) { - this.keepAlive = keepAlive; - } - - // setters for request parameters of the wrapped SearchRequest - /** - * Set the routing value to control the shards that the search will be executed on. - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public void setRouting(String routing) { - this.searchRequest.routing(routing); - } - - /** - * Set the routing values to control the shards that the search will be executed on. - */ - public void setRoutings(String... routings) { - this.searchRequest.routing(routings); - } - - /** - * Get the routing value to control the shards that the search will be executed on. - */ - public String getRouting() { - return this.searchRequest.routing(); - } - - /** - * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * {@code _local} to prefer local shards or a custom value, which guarantees that the same order - * will be used across different requests. - */ - public void setPreference(String preference) { - this.searchRequest.preference(preference); - } - - /** - * Get the preference to execute the search. - */ - public String getPreference() { - return this.searchRequest.preference(); - } - - /** - * Specifies what type of requested indices to ignore and how to deal with indices wildcard expressions. - */ - public void setIndicesOptions(IndicesOptions indicesOptions) { - this.searchRequest.indicesOptions(indicesOptions); - } - - /** - * Get the indices Options. - */ - public IndicesOptions getIndicesOptions() { - return this.searchRequest.indicesOptions(); - } - - /** - * The search type to execute, defaults to {@link SearchType#DEFAULT}. - */ - public void setSearchType(SearchType searchType) { - this.searchRequest.searchType(searchType); - } - - /** - * Get the search type to execute, defaults to {@link SearchType#DEFAULT}. - */ - public SearchType getSearchType() { - return this.searchRequest.searchType(); - } - - /** - * Sets if this request should allow partial results. (If method is not called, - * will default to the cluster level setting). - */ - public void setAllowPartialSearchResults(boolean allowPartialSearchResults) { - this.searchRequest.allowPartialSearchResults(allowPartialSearchResults); - } - - /** - * Gets if this request should allow partial results. - */ - public Boolean getAllowPartialSearchResults() { - return this.searchRequest.allowPartialSearchResults(); - } - - /** - * Optional. Sets the number of shard results that should be reduced at once on the coordinating node. - * This value should be used as a protection mechanism to reduce the memory overhead per search - * request if the potential number of shards in the request can be large. Defaults to 5. - */ - public void setBatchedReduceSize(int batchedReduceSize) { - this.batchedReduceSize = batchedReduceSize; - } - - /** - * Gets the number of shard results that should be reduced at once on the coordinating node. - * Returns {@code null} if unset. - */ - public Integer getBatchedReduceSize() { - return this.batchedReduceSize; - } - - /** - * Sets if this request should use the request cache or not, assuming that it can (for - * example, if "now" is used, it will never be cached). - * By default (if not set) this is turned on for {@link SubmitAsyncSearchRequest}. - */ - public void setRequestCache(Boolean requestCache) { - this.searchRequest.requestCache(requestCache); - } - - /** - * Gets if this request should use the request cache or not, if set. - * This defaults to `true` on the server side if unset in the client. - */ - public Boolean getRequestCache() { - return this.searchRequest.requestCache(); - } - - /** - * Returns the number of shard requests that should be executed concurrently on a single node. - * The default is {@code 5}. - */ - public int getMaxConcurrentShardRequests() { - return this.searchRequest.getMaxConcurrentShardRequests(); - } - - /** - * Sets the number of shard requests that should be executed concurrently on a single node. - * The default is {@code 5}. - */ - public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) { - this.searchRequest.setMaxConcurrentShardRequests(maxConcurrentShardRequests); - } - - /** - * Gets if the source of the {@link SearchSourceBuilder} initially used on this request. - */ - public SearchSourceBuilder getSearchSource() { - return this.searchRequest.source(); - } - - @Override - public Optional validate() { - final ValidationException validationException = new ValidationException(); - if (searchRequest.isSuggestOnly()) { - validationException.addValidationError("suggest-only queries are not supported"); - } - if (keepAlive != null && keepAlive.getMillis() < MIN_KEEP_ALIVE) { - validationException.addValidationError("[keep_alive] must be greater than 1 minute, got: " + keepAlive.toString()); - } - if (validationException.validationErrors().isEmpty()) { - return Optional.empty(); - } - return Optional.of(validationException); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - SubmitAsyncSearchRequest request = (SubmitAsyncSearchRequest) o; - return Objects.equals(searchRequest, request.searchRequest) - && Objects.equals(getKeepAlive(), request.getKeepAlive()) - && Objects.equals(getWaitForCompletionTimeout(), request.getWaitForCompletionTimeout()) - && Objects.equals(isKeepOnCompletion(), request.isKeepOnCompletion()); - } - - @Override - public int hashCode() { - return Objects.hash(searchRequest, getKeepAlive(), getWaitForCompletionTimeout(), isKeepOnCompletion()); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/ProxyModeInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/ProxyModeInfo.java deleted file mode 100644 index 0e55c232cf3f..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/ProxyModeInfo.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.cluster; - -import java.util.Objects; - -public class ProxyModeInfo implements RemoteConnectionInfo.ModeInfo { - static final String NAME = "proxy"; - static final String PROXY_ADDRESS = "proxy_address"; - static final String SERVER_NAME = "server_name"; - static final String NUM_PROXY_SOCKETS_CONNECTED = "num_proxy_sockets_connected"; - static final String MAX_PROXY_SOCKET_CONNECTIONS = "max_proxy_socket_connections"; - private final String address; - private final String serverName; - private final int maxSocketConnections; - private final int numSocketsConnected; - - ProxyModeInfo(String address, String serverName, int maxSocketConnections, int numSocketsConnected) { - this.address = address; - this.serverName = serverName; - this.maxSocketConnections = maxSocketConnections; - this.numSocketsConnected = numSocketsConnected; - } - - @Override - public boolean isConnected() { - return numSocketsConnected > 0; - } - - @Override - public String modeName() { - return NAME; - } - - public String getAddress() { - return address; - } - - public String getServerName() { - return serverName; - } - - public int getMaxSocketConnections() { - return maxSocketConnections; - } - - public int getNumSocketsConnected() { - return numSocketsConnected; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ProxyModeInfo otherProxy = (ProxyModeInfo) o; - return maxSocketConnections == otherProxy.maxSocketConnections - && numSocketsConnected == otherProxy.numSocketsConnected - && Objects.equals(address, otherProxy.address) - && Objects.equals(serverName, otherProxy.serverName); - } - - @Override - public int hashCode() { - return Objects.hash(address, serverName, maxSocketConnections, numSocketsConnected); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteConnectionInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteConnectionInfo.java deleted file mode 100644 index f5069d7771d7..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteConnectionInfo.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.cluster; - -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.List; -import java.util.Objects; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - -/** - * This class encapsulates all remote cluster information to be rendered on - * {@code _remote/info} requests. - */ -public final class RemoteConnectionInfo { - private static final String CONNECTED = "connected"; - private static final String MODE = "mode"; - private static final String INITIAL_CONNECT_TIMEOUT = "initial_connect_timeout"; - private static final String SKIP_UNAVAILABLE = "skip_unavailable"; - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "RemoteConnectionInfoObjectParser", - false, - (args, clusterAlias) -> { - String mode = (String) args[1]; - ModeInfo modeInfo; - if (mode.equals(ProxyModeInfo.NAME)) { - modeInfo = new ProxyModeInfo((String) args[4], (String) args[5], (int) args[6], (int) args[7]); - } else if (mode.equals(SniffModeInfo.NAME)) { - modeInfo = new SniffModeInfo((List) args[8], (int) args[9], (int) args[10]); - } else { - throw new IllegalArgumentException("mode cannot be " + mode); - } - return new RemoteConnectionInfo(clusterAlias, modeInfo, (String) args[2], (boolean) args[3]); - } - ); - - static { - PARSER.declareBoolean(constructorArg(), new ParseField(CONNECTED)); - PARSER.declareString(constructorArg(), new ParseField(MODE)); - PARSER.declareString(constructorArg(), new ParseField(INITIAL_CONNECT_TIMEOUT)); - PARSER.declareBoolean(constructorArg(), new ParseField(SKIP_UNAVAILABLE)); - - PARSER.declareString(optionalConstructorArg(), new ParseField(ProxyModeInfo.PROXY_ADDRESS)); - PARSER.declareString(optionalConstructorArg(), new ParseField(ProxyModeInfo.SERVER_NAME)); - PARSER.declareInt(optionalConstructorArg(), new ParseField(ProxyModeInfo.MAX_PROXY_SOCKET_CONNECTIONS)); - PARSER.declareInt(optionalConstructorArg(), new ParseField(ProxyModeInfo.NUM_PROXY_SOCKETS_CONNECTED)); - - PARSER.declareStringArray(optionalConstructorArg(), new ParseField(SniffModeInfo.SEEDS)); - PARSER.declareInt(optionalConstructorArg(), new ParseField(SniffModeInfo.MAX_CONNECTIONS_PER_CLUSTER)); - PARSER.declareInt(optionalConstructorArg(), new ParseField(SniffModeInfo.NUM_NODES_CONNECTED)); - } - - private final ModeInfo modeInfo; - // TODO: deprecate and remove this field in favor of initialConnectionTimeout field that is of type TimeValue. - // When rest api versioning exists then change org.elasticsearch.transport.RemoteConnectionInfo to properly serialize - // the initialConnectionTimeout field so that we can properly parse initialConnectionTimeout as TimeValue - private final String initialConnectionTimeoutString; - private final String clusterAlias; - private final boolean skipUnavailable; - - RemoteConnectionInfo(String clusterAlias, ModeInfo modeInfo, String initialConnectionTimeoutString, boolean skipUnavailable) { - this.clusterAlias = clusterAlias; - this.modeInfo = modeInfo; - this.initialConnectionTimeoutString = initialConnectionTimeoutString; - this.skipUnavailable = skipUnavailable; - } - - public boolean isConnected() { - return modeInfo.isConnected(); - } - - public String getClusterAlias() { - return clusterAlias; - } - - public ModeInfo getModeInfo() { - return modeInfo; - } - - public String getInitialConnectionTimeoutString() { - return initialConnectionTimeoutString; - } - - public boolean isSkipUnavailable() { - return skipUnavailable; - } - - public static RemoteConnectionInfo fromXContent(XContentParser parser, String clusterAlias) throws IOException { - return PARSER.parse(parser, clusterAlias); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - RemoteConnectionInfo that = (RemoteConnectionInfo) o; - return skipUnavailable == that.skipUnavailable - && Objects.equals(modeInfo, that.modeInfo) - && Objects.equals(initialConnectionTimeoutString, that.initialConnectionTimeoutString) - && Objects.equals(clusterAlias, that.clusterAlias); - } - - @Override - public int hashCode() { - return Objects.hash(modeInfo, initialConnectionTimeoutString, clusterAlias, skipUnavailable); - } - - public interface ModeInfo { - - boolean isConnected(); - - String modeName(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteInfoRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteInfoRequest.java deleted file mode 100644 index 7672c1054c04..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteInfoRequest.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client.cluster; - -import org.elasticsearch.client.Validatable; - -/** - * The request object used by the Remote cluster info API. - */ -public final class RemoteInfoRequest implements Validatable { - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteInfoResponse.java deleted file mode 100644 index 9aff43bf3871..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/RemoteInfoResponse.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client.cluster; - -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -/** - * A response to _remote/info API request. - */ -public final class RemoteInfoResponse { - - private List infos; - - RemoteInfoResponse(Collection infos) { - this.infos = List.copyOf(infos); - } - - public List getInfos() { - return infos; - } - - public static RemoteInfoResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - - List infos = new ArrayList<>(); - - XContentParser.Token token; - while ((token = parser.nextToken()) == XContentParser.Token.FIELD_NAME) { - String clusterAlias = parser.currentName(); - RemoteConnectionInfo info = RemoteConnectionInfo.fromXContent(parser, clusterAlias); - infos.add(info); - } - ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser); - return new RemoteInfoResponse(infos); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/SniffModeInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/SniffModeInfo.java deleted file mode 100644 index e08509dd14b6..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/cluster/SniffModeInfo.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.cluster; - -import java.util.List; -import java.util.Objects; - -public class SniffModeInfo implements RemoteConnectionInfo.ModeInfo { - public static final String NAME = "sniff"; - static final String SEEDS = "seeds"; - static final String NUM_NODES_CONNECTED = "num_nodes_connected"; - static final String MAX_CONNECTIONS_PER_CLUSTER = "max_connections_per_cluster"; - final List seedNodes; - final int maxConnectionsPerCluster; - final int numNodesConnected; - - SniffModeInfo(List seedNodes, int maxConnectionsPerCluster, int numNodesConnected) { - this.seedNodes = seedNodes; - this.maxConnectionsPerCluster = maxConnectionsPerCluster; - this.numNodesConnected = numNodesConnected; - } - - @Override - public boolean isConnected() { - return numNodesConnected > 0; - } - - @Override - public String modeName() { - return NAME; - } - - public List getSeedNodes() { - return seedNodes; - } - - public int getMaxConnectionsPerCluster() { - return maxConnectionsPerCluster; - } - - public int getNumNodesConnected() { - return numNodesConnected; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SniffModeInfo sniff = (SniffModeInfo) o; - return maxConnectionsPerCluster == sniff.maxConnectionsPerCluster - && numNodesConnected == sniff.numNodesConnected - && Objects.equals(seedNodes, sniff.seedNodes); - } - - @Override - public int hashCode() { - return Objects.hash(seedNodes, maxConnectionsPerCluster, numNodesConnected); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java deleted file mode 100644 index ef3b35a0896d..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Collection; -import java.util.Collections; -import java.util.Objects; - -/** - * Represents a response to a request that is broadcast to a collection of shards. - */ -public class BroadcastResponse { - - private final Shards shards; - - /** - * Represents the shard-level summary of the response execution. - * - * @return the shard-level response summary - */ - public Shards shards() { - return shards; - } - - protected BroadcastResponse(final Shards shards) { - this.shards = Objects.requireNonNull(shards); - } - - private static final ParseField SHARDS_FIELD = new ParseField("_shards"); - - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "broadcast_response", - a -> new BroadcastResponse((Shards) a[0]) - ); - - static { - declareShardsField(PARSER); - } - - /** - * Parses a broadcast response. - * - * @param parser the parser - * @return a broadcast response parsed from the specified parser - * @throws IOException if an I/O exception occurs parsing the response - */ - public static BroadcastResponse fromXContent(final XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - protected static void declareShardsField(ConstructingObjectParser parser) { - parser.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD); - } - - /** - * Represents the results of a collection of shards on which a request was executed against. - */ - public static class Shards { - - private final int total; - - /** - * The total number of shards on which a request was executed against. - * - * @return the total number of shards - */ - public int total() { - return total; - } - - private final int successful; - - /** - * The number of successful shards on which a request was executed against. - * - * @return the number of successful shards - */ - public int successful() { - return successful; - } - - private final int skipped; - - /** - * The number of shards skipped by the request. - * - * @return the number of skipped shards - */ - public int skipped() { - return skipped; - } - - private final int failed; - - /** - * The number of shards on which a request failed to be executed against. - * - * @return the number of failed shards - */ - public int failed() { - return failed; - } - - private final Collection failures; - - /** - * The failures corresponding to the shards on which a request failed to be executed against. Note that the number of failures might - * not match {@link #failed()} as some responses group together shard failures. - * - * @return the failures - */ - public Collection failures() { - return failures; - } - - Shards( - final int total, - final int successful, - final int skipped, - final int failed, - final Collection failures - ) { - this.total = total; - this.successful = successful; - this.skipped = skipped; - this.failed = failed; - this.failures = Collections.unmodifiableCollection(Objects.requireNonNull(failures)); - } - - private static final ParseField TOTAL_FIELD = new ParseField("total"); - private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful"); - private static final ParseField SKIPPED_FIELD = new ParseField("skipped"); - private static final ParseField FAILED_FIELD = new ParseField("failed"); - private static final ParseField FAILURES_FIELD = new ParseField("failures"); - - @SuppressWarnings("unchecked") - static final ConstructingObjectParser SHARDS_PARSER = new ConstructingObjectParser<>( - "shards", - a -> new Shards( - (int) a[0], // total - (int) a[1], // successful - a[2] == null ? 0 : (int) a[2], // skipped - (int) a[3], // failed - a[4] == null ? Collections.emptyList() : (Collection) a[4] - ) - ); // failures - - static { - SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), TOTAL_FIELD); - SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SUCCESSFUL_FIELD); - SHARDS_PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SKIPPED_FIELD); - SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), FAILED_FIELD); - SHARDS_PARSER.declareObjectArray( - ConstructingObjectParser.optionalConstructorArg(), - DefaultShardOperationFailedException.PARSER, - FAILURES_FIELD - ); - } - - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java deleted file mode 100644 index 0899eb03311d..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Validatable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Objects; - -/** - * Encapsulates a request to _count API against one, several or all indices. - */ -public final class CountRequest implements Validatable, ToXContentObject { - - private String[] indices = Strings.EMPTY_ARRAY; - private String[] types = Strings.EMPTY_ARRAY; - private String routing; - private String preference; - private QueryBuilder query; - private IndicesOptions indicesOptions; - private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER; - private Float minScore; - - public CountRequest() {} - - /** - * Constructs a new count request against the indices. No indices provided here means that count will execute on all indices. - */ - public CountRequest(String... indices) { - indices(indices); - } - - /** - * Constructs a new search request against the provided indices with the given search source. - * - * @deprecated The count api only supports a query. Use {@link #CountRequest(String[], QueryBuilder)} instead. - */ - @Deprecated - public CountRequest(String[] indices, SearchSourceBuilder searchSourceBuilder) { - indices(indices); - this.query = Objects.requireNonNull(searchSourceBuilder, "source must not be null").query(); - } - - /** - * Constructs a new search request against the provided indices with the given query. - */ - public CountRequest(String[] indices, QueryBuilder query) { - indices(indices); - this.query = Objects.requireNonNull(query, "query must not be null"); - ; - } - - /** - * Sets the indices the count will be executed on. - */ - public CountRequest indices(String... indices) { - Objects.requireNonNull(indices, "indices must not be null"); - for (String index : indices) { - Objects.requireNonNull(index, "index must not be null"); - } - this.indices = indices; - return this; - } - - /** - * The source of the count request. - * - * @deprecated The count api only supports a query. Use {@link #query(QueryBuilder)} instead. - */ - @Deprecated - public CountRequest source(SearchSourceBuilder searchSourceBuilder) { - this.query = Objects.requireNonNull(searchSourceBuilder, "source must not be null").query(); - return this; - } - - /** - * Sets the query to execute for this count request. - */ - public CountRequest query(QueryBuilder query) { - this.query = Objects.requireNonNull(query, "query must not be null"); - return this; - } - - /** - * The document types to execute the count against. Defaults to be executed against all types. - * - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public CountRequest types(String... types) { - Objects.requireNonNull(types, "types must not be null"); - for (String type : types) { - Objects.requireNonNull(type, "type must not be null"); - } - this.types = types; - return this; - } - - /** - * The routing values to control the shards that the search will be executed on. - */ - public CountRequest routing(String routing) { - this.routing = routing; - return this; - } - - /** - * A comma separated list of routing values to control the shards the count will be executed on. - */ - public CountRequest routing(String... routings) { - this.routing = Strings.arrayToCommaDelimitedString(routings); - return this; - } - - /** - * Returns the indices options used to resolve indices. They tell for instance whether a single index is accepted, whether an empty - * array will be converted to _all, and how wildcards will be expanded if needed. - * - * @see org.elasticsearch.action.support.IndicesOptions - */ - public CountRequest indicesOptions(IndicesOptions indicesOptions) { - this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); - return this; - } - - /** - * Sets the preference to execute the count. Defaults to randomize across shards. Can be set to {@code _local} to prefer local shards - * or a custom value, which guarantees that the same order will be used across different requests. - */ - public CountRequest preference(String preference) { - this.preference = preference; - return this; - } - - public IndicesOptions indicesOptions() { - return this.indicesOptions; - } - - public String routing() { - return this.routing; - } - - public String preference() { - return this.preference; - } - - public String[] indices() { - return Arrays.copyOf(this.indices, this.indices.length); - } - - public Float minScore() { - return minScore; - } - - public CountRequest minScore(Float minScore) { - this.minScore = minScore; - return this; - } - - public int terminateAfter() { - return this.terminateAfter; - } - - public CountRequest terminateAfter(int terminateAfter) { - if (terminateAfter < 0) { - throw new IllegalArgumentException("terminateAfter must be > 0"); - } - this.terminateAfter = terminateAfter; - return this; - } - - /** - * @deprecated Types are in the process of being removed. Instead of using a type, prefer to - * filter on a field on the document. - */ - @Deprecated - public String[] types() { - return Arrays.copyOf(this.types, this.types.length); - } - - /** - * @return the source builder - * @deprecated The count api only supports a query. Use {@link #query()} instead. - */ - @Deprecated - public SearchSourceBuilder source() { - return new SearchSourceBuilder().query(query); - } - - /** - * @return The provided query to execute with the count request or - * null if no query was provided. - */ - public QueryBuilder query() { - return query; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (query != null) { - builder.field("query", query); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - CountRequest that = (CountRequest) o; - return Objects.equals(indicesOptions, that.indicesOptions) - && Arrays.equals(indices, that.indices) - && Arrays.equals(types, that.types) - && Objects.equals(routing, that.routing) - && Objects.equals(preference, that.preference) - && Objects.equals(terminateAfter, that.terminateAfter) - && Objects.equals(minScore, that.minScore) - && Objects.equals(query, that.query); - } - - @Override - public int hashCode() { - int result = Objects.hash(indicesOptions, routing, preference, terminateAfter, minScore, query); - result = 31 * result + Arrays.hashCode(indices); - result = 31 * result + Arrays.hashCode(types); - return result; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java deleted file mode 100644 index c19245c4d09f..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -/** - * A response to _count API request. - */ -public final class CountResponse { - - static final ParseField COUNT = new ParseField("count"); - static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); - static final ParseField SHARDS = new ParseField("_shards"); - - private final long count; - private final Boolean terminatedEarly; - private final ShardStats shardStats; - - public CountResponse(long count, Boolean terminatedEarly, ShardStats shardStats) { - this.count = count; - this.terminatedEarly = terminatedEarly; - this.shardStats = shardStats; - } - - public ShardStats getShardStats() { - return shardStats; - } - - /** - * Number of documents matching request. - */ - public long getCount() { - return count; - } - - /** - * The total number of shards the search was executed on. - */ - public int getTotalShards() { - return shardStats.totalShards; - } - - /** - * The successful number of shards the search was executed on. - */ - public int getSuccessfulShards() { - return shardStats.successfulShards; - } - - /** - * The number of shards skipped due to pre-filtering - */ - public int getSkippedShards() { - return shardStats.skippedShards; - } - - /** - * The failed number of shards the search was executed on. - */ - public int getFailedShards() { - return shardStats.shardFailures.length; - } - - /** - * The failures that occurred during the search. - */ - public ShardSearchFailure[] getShardFailures() { - return shardStats.shardFailures; - } - - public RestStatus status() { - return RestStatus.status(shardStats.successfulShards, shardStats.totalShards, shardStats.shardFailures); - } - - public static CountResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - parser.nextToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); - String currentName = parser.currentName(); - Boolean terminatedEarly = null; - long count = 0; - ShardStats shardStats = new ShardStats(-1, -1, 0, ShardSearchFailure.EMPTY_ARRAY); - - for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { - if (token == XContentParser.Token.FIELD_NAME) { - currentName = parser.currentName(); - } else if (token.isValue()) { - if (COUNT.match(currentName, parser.getDeprecationHandler())) { - count = parser.longValue(); - } else if (TERMINATED_EARLY.match(currentName, parser.getDeprecationHandler())) { - terminatedEarly = parser.booleanValue(); - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if (SHARDS.match(currentName, parser.getDeprecationHandler())) { - shardStats = ShardStats.fromXContent(parser); - } else { - parser.skipChildren(); - } - } - } - return new CountResponse(count, terminatedEarly, shardStats); - } - - @Override - public String toString() { - String s = "{" - + "count=" - + count - + (isTerminatedEarly() != null ? ", terminatedEarly=" + terminatedEarly : "") - + ", " - + shardStats - + '}'; - return s; - } - - public Boolean isTerminatedEarly() { - return terminatedEarly; - } - - /** - * Encapsulates _shards section of count api response. - */ - public static final class ShardStats { - - static final ParseField FAILED = new ParseField("failed"); - static final ParseField SKIPPED = new ParseField("skipped"); - static final ParseField TOTAL = new ParseField("total"); - static final ParseField SUCCESSFUL = new ParseField("successful"); - static final ParseField FAILURES = new ParseField("failures"); - - private final int successfulShards; - private final int totalShards; - private final int skippedShards; - private final ShardSearchFailure[] shardFailures; - - public ShardStats(int successfulShards, int totalShards, int skippedShards, ShardSearchFailure[] shardFailures) { - this.successfulShards = successfulShards; - this.totalShards = totalShards; - this.skippedShards = skippedShards; - this.shardFailures = Arrays.copyOf(shardFailures, shardFailures.length); - } - - public int getSuccessfulShards() { - return successfulShards; - } - - public int getTotalShards() { - return totalShards; - } - - public int getSkippedShards() { - return skippedShards; - } - - public ShardSearchFailure[] getShardFailures() { - return Arrays.copyOf(shardFailures, shardFailures.length, ShardSearchFailure[].class); - } - - static ShardStats fromXContent(XContentParser parser) throws IOException { - int successfulShards = -1; - int totalShards = -1; - int skippedShards = 0; // BWC @see org.elasticsearch.action.search.SearchResponse - List failures = new ArrayList<>(); - XContentParser.Token token; - String currentName = parser.currentName(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentName = parser.currentName(); - } else if (token.isValue()) { - if (FAILED.match(currentName, parser.getDeprecationHandler())) { - parser.intValue(); - } else if (SKIPPED.match(currentName, parser.getDeprecationHandler())) { - skippedShards = parser.intValue(); - } else if (TOTAL.match(currentName, parser.getDeprecationHandler())) { - totalShards = parser.intValue(); - } else if (SUCCESSFUL.match(currentName, parser.getDeprecationHandler())) { - successfulShards = parser.intValue(); - } else { - parser.skipChildren(); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (FAILURES.match(currentName, parser.getDeprecationHandler())) { - while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { - failures.add(ShardSearchFailure.fromXContent(parser)); - } - } else { - parser.skipChildren(); - } - } else { - parser.skipChildren(); - } - } - return new ShardStats(successfulShards, totalShards, skippedShards, failures.toArray(new ShardSearchFailure[failures.size()])); - } - - @Override - public String toString() { - return "_shards : {" - + "total=" - + totalShards - + ", successful=" - + successfulShards - + ", skipped=" - + skippedShards - + ", failed=" - + (shardFailures != null && shardFailures.length > 0 ? shardFailures.length : 0) - + (shardFailures != null && shardFailures.length > 0 ? ", failures: " + Arrays.asList(shardFailures) : "") - + '}'; - } - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/GetSourceRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/GetSourceRequest.java deleted file mode 100644 index 6e26457a27a5..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/GetSourceRequest.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.client.Validatable; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; - -public final class GetSourceRequest implements Validatable { - private String routing; - private String preference; - - private boolean refresh = false; - private boolean realtime = true; - - private FetchSourceContext fetchSourceContext; - - private final String index; - private final String id; - - public GetSourceRequest(String index, String id) { - this.index = index; - this.id = id; - } - - public static GetSourceRequest from(GetRequest getRequest) { - return new GetSourceRequest(getRequest.index(), getRequest.id()).routing(getRequest.routing()) - .preference(getRequest.preference()) - .refresh(getRequest.refresh()) - .realtime(getRequest.realtime()) - .fetchSourceContext(getRequest.fetchSourceContext()); - } - - /** - * Controls the shard routing of the request. Using this value to hash the shard - * and not the id. - */ - public GetSourceRequest routing(String routing) { - if (routing != null && routing.length() == 0) { - this.routing = null; - } else { - this.routing = routing; - } - return this; - } - - /** - * Sets the preference to execute the search. Defaults to randomize across shards. Can be set to - * {@code _local} to prefer local shards or a custom value, which guarantees that the same order - * will be used across different requests. - */ - public GetSourceRequest preference(String preference) { - this.preference = preference; - return this; - } - - /** - * Should a refresh be executed before this get operation causing the operation to - * return the latest value. Note, heavy get should not set this to {@code true}. Defaults - * to {@code false}. - */ - public GetSourceRequest refresh(boolean refresh) { - this.refresh = refresh; - return this; - } - - public GetSourceRequest realtime(boolean realtime) { - this.realtime = realtime; - return this; - } - - /** - * Allows setting the {@link FetchSourceContext} for this request, controlling if and how _source should be returned. - * Note, the {@code fetchSource} field of the context must be set to {@code true}. - */ - - public GetSourceRequest fetchSourceContext(FetchSourceContext context) { - this.fetchSourceContext = context; - return this; - } - - public String index() { - return index; - } - - public String id() { - return id; - } - - public String routing() { - return routing; - } - - public String preference() { - return preference; - } - - public boolean refresh() { - return refresh; - } - - public boolean realtime() { - return realtime; - } - - public FetchSourceContext fetchSourceContext() { - return fetchSourceContext; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/GetSourceResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/GetSourceResponse.java deleted file mode 100644 index 45469cf1d1fb..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/GetSourceResponse.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Map; - -public final class GetSourceResponse { - - private final Map source; - - public GetSourceResponse(Map source) { - this.source = source; - } - - public static GetSourceResponse fromXContent(XContentParser parser) throws IOException { - return new GetSourceResponse(parser.map()); - } - - public Map getSource() { - return this.source; - } - - @Override - public String toString() { - return source.toString(); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerJobStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerJobStats.java deleted file mode 100644 index e404f254e17a..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerJobStats.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.xcontent.ParseField; - -import java.util.Objects; - -public abstract class IndexerJobStats { - public static ParseField NUM_PAGES = new ParseField("pages_processed"); - public static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); - public static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("documents_indexed"); - public static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); - public static ParseField INDEX_TIME_IN_MS = new ParseField("index_time_in_ms"); - public static ParseField SEARCH_TIME_IN_MS = new ParseField("search_time_in_ms"); - public static ParseField PROCESSING_TIME_IN_MS = new ParseField("processing_time_in_ms"); - public static ParseField INDEX_TOTAL = new ParseField("index_total"); - public static ParseField SEARCH_TOTAL = new ParseField("search_total"); - public static ParseField PROCESSING_TOTAL = new ParseField("processing_total"); - public static ParseField SEARCH_FAILURES = new ParseField("search_failures"); - public static ParseField INDEX_FAILURES = new ParseField("index_failures"); - - protected final long numPages; - protected final long numInputDocuments; - protected final long numOuputDocuments; - protected final long numInvocations; - protected final long indexTime; - protected final long indexTotal; - protected final long searchTime; - protected final long searchTotal; - protected final long processingTime; - protected final long processingTotal; - protected final long indexFailures; - protected final long searchFailures; - - public IndexerJobStats( - long numPages, - long numInputDocuments, - long numOutputDocuments, - long numInvocations, - long indexTime, - long searchTime, - long processingTime, - long indexTotal, - long searchTotal, - long processingTotal, - long indexFailures, - long searchFailures - ) { - this.numPages = numPages; - this.numInputDocuments = numInputDocuments; - this.numOuputDocuments = numOutputDocuments; - this.numInvocations = numInvocations; - this.indexTime = indexTime; - this.indexTotal = indexTotal; - this.searchTime = searchTime; - this.searchTotal = searchTotal; - this.processingTime = processingTime; - this.processingTotal = processingTotal; - this.indexFailures = indexFailures; - this.searchFailures = searchFailures; - } - - /** - * The number of pages read from the input indices - */ - public long getNumPages() { - return numPages; - } - - /** - * The number of documents read from the input indices - */ - public long getNumDocuments() { - return numInputDocuments; - } - - /** - * Number of times that the job woke up to write documents - */ - public long getNumInvocations() { - return numInvocations; - } - - /** - * Number of documents written - */ - public long getOutputDocuments() { - return numOuputDocuments; - } - - /** - * Number of index failures that have occurred - */ - public long getIndexFailures() { - return indexFailures; - } - - /** - * Number of failures that have occurred - */ - public long getSearchFailures() { - return searchFailures; - } - - /** - * Returns the time spent indexing (cumulative) in milliseconds - */ - public long getIndexTime() { - return indexTime; - } - - /** - * Returns the time spent searching (cumulative) in milliseconds - */ - public long getSearchTime() { - return searchTime; - } - - /** - * Returns the time spent processing (cumulative) in milliseconds - */ - public long getProcessingTime() { - return processingTime; - } - - /** - * Returns the total number of indexing requests that have been processed - * (Note: this is not the number of _documents_ that have been indexed) - */ - public long getIndexTotal() { - return indexTotal; - } - - /** - * Returns the total number of search requests that have been made - */ - public long getSearchTotal() { - return searchTotal; - } - - /** - * Returns the total number of processing runs that have been made - */ - public long getProcessingTotal() { - return processingTotal; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - - if (other instanceof IndexerJobStats == false) { - return false; - } - - IndexerJobStats that = (IndexerJobStats) other; - return Objects.equals(this.numPages, that.numPages) - && Objects.equals(this.numInputDocuments, that.numInputDocuments) - && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) - && Objects.equals(this.numInvocations, that.numInvocations) - && Objects.equals(this.indexTime, that.indexTime) - && Objects.equals(this.searchTime, that.searchTime) - && Objects.equals(this.processingTime, that.processingTime) - && Objects.equals(this.indexFailures, that.indexFailures) - && Objects.equals(this.searchFailures, that.searchFailures) - && Objects.equals(this.searchTotal, that.searchTotal) - && Objects.equals(this.processingTotal, that.processingTotal) - && Objects.equals(this.indexTotal, that.indexTotal); - } - - @Override - public int hashCode() { - return Objects.hash( - numPages, - numInputDocuments, - numOuputDocuments, - numInvocations, - indexTime, - searchTime, - processingTime, - indexFailures, - searchFailures, - searchTotal, - indexTotal, - processingTotal - ); - } - - @Override - public final String toString() { - return "{pages=" - + numPages - + ", input_docs=" - + numInputDocuments - + ", output_docs=" - + numOuputDocuments - + ", invocations=" - + numInvocations - + ", index_failures=" - + indexFailures - + ", search_failures=" - + searchFailures - + ", index_time_in_ms=" - + indexTime - + ", index_total=" - + indexTotal - + ", search_time_in_ms=" - + searchTime - + ", search_total=" - + searchTotal - + ", processing_time_in_ms=" - + processingTime - + ", processing_total=" - + processingTotal - + "}"; - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerState.java deleted file mode 100644 index 95ca55505261..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/IndexerState.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import java.util.Locale; - -/** - * IndexerState represents the internal state of the indexer. It - * is also persistent when changing from started/stopped in case the allocated - * task is restarted elsewhere. - */ -public enum IndexerState { - /** Indexer is running, but not actively indexing data (e.g. it's idle). */ - STARTED, - - /** Indexer is actively indexing data. */ - INDEXING, - - /** - * Transition state to where an indexer has acknowledged the stop - * but is still in process of halting. - */ - STOPPING, - - /** Indexer is "paused" and ignoring scheduled triggers. */ - STOPPED, - - /** - * Something (internal or external) has requested the indexer abort - * and shutdown. - */ - ABORTING; - - public static IndexerState fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - - public String value() { - return name().toLowerCase(Locale.ROOT); - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainRequest.java deleted file mode 100644 index 592d98674372..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MainRequest.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.client.Validatable; - -public class MainRequest implements Validatable {} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java deleted file mode 100644 index cdd98bb12a56..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.client.Validatable; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.client.core.TermVectorsRequest.createFromTemplate; - -public class MultiTermVectorsRequest implements ToXContentObject, Validatable { - - private List requests = new ArrayList<>(); - - /** - * Constructs an empty MultiTermVectorsRequest - * After that use {@code add} method to add individual {@code TermVectorsRequest} to it. - */ - public MultiTermVectorsRequest() {} - - /** - * Constructs a MultiTermVectorsRequest from the given document ids - * and a template {@code TermVectorsRequest}. - * Used when individual requests share the same index, type and other settings. - * @param ids - ids of documents for which term vectors are requested - * @param template - a template {@code TermVectorsRequest} that allows to set all - * settings only once for all requests. - */ - public MultiTermVectorsRequest(String[] ids, TermVectorsRequest template) { - for (String id : ids) { - TermVectorsRequest request = createFromTemplate(template, id); - requests.add(request); - } - } - - /** - * Adds another {@code TermVectorsRequest} to this {@code MultiTermVectorsRequest} - * @param request - {@code TermVectorsRequest} to add - */ - public void add(TermVectorsRequest request) { - requests.add(request); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startArray("docs"); - for (TermVectorsRequest request : requests) { - request.toXContent(builder, params); - } - builder.endArray(); - builder.endObject(); - return builder; - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsResponse.java deleted file mode 100644 index 3f836d714433..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsResponse.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; -import java.util.Objects; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - -public class MultiTermVectorsResponse { - private final List responses; - - public MultiTermVectorsResponse(List responses) { - this.responses = responses; - } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "multi_term_vectors", - true, - args -> { - // as the response comes from server, we are sure that args[0] will be a list of TermVectorsResponse - @SuppressWarnings("unchecked") - List termVectorsResponsesList = (List) args[0]; - return new MultiTermVectorsResponse(termVectorsResponsesList); - } - ); - - static { - PARSER.declareObjectArray(constructorArg(), (p, c) -> TermVectorsResponse.fromXContent(p), new ParseField("docs")); - } - - public static MultiTermVectorsResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - /** - * Returns the list of {@code TermVectorsResponse} for this {@code MultiTermVectorsResponse} - */ - public List getTermVectorsResponses() { - return responses; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if ((obj instanceof MultiTermVectorsResponse) == false) return false; - MultiTermVectorsResponse other = (MultiTermVectorsResponse) obj; - return Objects.equals(responses, other.responses); - } - - @Override - public int hashCode() { - return Objects.hash(responses); - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/PageParams.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/PageParams.java deleted file mode 100644 index c41e17e5d1ed..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/PageParams.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client.core; - -import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Objects; - -/** - * Paging parameters for GET requests - */ -public class PageParams implements ToXContentObject { - - public static final ParseField PAGE = new ParseField("page"); - public static final ParseField FROM = new ParseField("from"); - public static final ParseField SIZE = new ParseField("size"); - - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - PAGE.getPreferredName(), - a -> new PageParams((Integer) a[0], (Integer) a[1]) - ); - - static { - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FROM); - PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SIZE); - } - - private final Integer from; - private final Integer size; - - /** - * Constructs paging parameters - * @param from skips the specified number of items. When {@code null} the default value will be used. - * @param size specifies the maximum number of items to obtain. When {@code null} the default value will be used. - */ - public PageParams(@Nullable Integer from, @Nullable Integer size) { - this.from = from; - this.size = size; - } - - public Integer getFrom() { - return from; - } - - public Integer getSize() { - return size; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - if (from != null) { - builder.field(FROM.getPreferredName(), from); - } - if (size != null) { - builder.field(SIZE.getPreferredName(), size); - } - builder.endObject(); - return builder; - } - - @Override - public int hashCode() { - return Objects.hash(from, size); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - PageParams other = (PageParams) obj; - return Objects.equals(from, other.from) && Objects.equals(size, other.size); - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java deleted file mode 100644 index af27dae1c04c..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.client.Validatable; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Map; - -public class TermVectorsRequest implements ToXContentObject, Validatable { - - private final String index; - @Nullable - private final String type; - private String id = null; - private XContentBuilder docBuilder = null; - - private String routing = null; - private String preference = null; - private boolean realtime = true; - private String[] fields = null; - private boolean requestPositions = true; - private boolean requestPayloads = true; - private boolean requestOffsets = true; - private boolean requestFieldStatistics = true; - private boolean requestTermStatistics = false; - private Map perFieldAnalyzer = null; - private Map filterSettings = null; - - /** - * Constructs TermVectorRequest for the given document - * - * @param index - index of the document - * @param docId - id of the document - */ - public TermVectorsRequest(String index, String docId) { - this.index = index; - this.type = null; - this.id = docId; - } - - /** - * Constructs TermVectorRequest for the given document - * - * @param index - index of the document - * @param type - type of the document - * @param docId - id of the document - * - * @deprecated Types are in the process of being removed, use - * {@link #TermVectorsRequest(String, String)} instead. - */ - @Deprecated - public TermVectorsRequest(String index, String type, String docId) { - this.index = index; - this.type = type; - this.id = docId; - } - - /** - * Constructs TermVectorRequest for an artificial document - * - * @param index - index of the document - * @param docBuilder - an artificial document - */ - public TermVectorsRequest(String index, XContentBuilder docBuilder) { - this.index = index; - this.type = null; - this.docBuilder = docBuilder; - } - - /** - * Constructs TermVectorRequest for an artificial document - * @param index - index of the document - * @param type - type of the document - * @param docBuilder - an artificial document - * - * @deprecated Types are in the process of being removed, use - * {@link TermVectorsRequest(String, XContentBuilder)} instead. - */ - @Deprecated - public TermVectorsRequest(String index, String type, XContentBuilder docBuilder) { - this.index = index; - this.type = type; - this.docBuilder = docBuilder; - } - - /** - * Constructs a new TermVectorRequest from a template - * using the provided document id - * @param template - a term vector request served as a template - * @param id - id of the requested document - */ - static TermVectorsRequest createFromTemplate(TermVectorsRequest template, String id) { - TermVectorsRequest request = new TermVectorsRequest(template.getIndex(), template.getType(), id); - request.realtime = template.getRealtime(); - request.requestPositions = template.requestPositions; - request.requestPayloads = template.requestPayloads; - request.requestOffsets = template.requestOffsets; - request.requestFieldStatistics = template.requestFieldStatistics; - request.requestTermStatistics = template.requestTermStatistics; - if (template.routing != null) request.setRouting(template.getRouting()); - if (template.preference != null) request.setPreference(template.getPreference()); - if (template.fields != null) request.setFields(template.getFields()); - if (template.perFieldAnalyzer != null) request.setPerFieldAnalyzer(template.perFieldAnalyzer); - if (template.filterSettings != null) request.setFilterSettings(template.filterSettings); - return request; - } - - /** - * Returns the index of the request - */ - public String getIndex() { - return index; - } - - /** - * Returns the type of the request - * - * @deprecated Types are in the process of being removed. - */ - @Deprecated - public String getType() { - return type; - } - - /** - * Returns the id of the request - * can be NULL if there is no document ID - */ - public String getId() { - return id; - } - - /** - * Sets the fields for which term vectors information should be retrieved - */ - public void setFields(String... fields) { - this.fields = fields; - } - - public String[] getFields() { - return fields; - } - - /** - * Sets whether to request term positions - */ - public void setPositions(boolean positions) { - this.requestPositions = positions; - } - - /** - * Sets whether to request term payloads - */ - public void setPayloads(boolean payloads) { - this.requestPayloads = payloads; - } - - /** - * Sets whether to request term offsets - */ - public void setOffsets(boolean offsets) { - this.requestOffsets = offsets; - } - - /** - * Sets whether to request field statistics - */ - public void setFieldStatistics(boolean fieldStatistics) { - this.requestFieldStatistics = fieldStatistics; - } - - /** - * Sets whether to request term statistics - */ - public void setTermStatistics(boolean termStatistics) { - this.requestTermStatistics = termStatistics; - } - - /** - * Sets different analyzers than the one at the fields - */ - public void setPerFieldAnalyzer(Map perFieldAnalyzer) { - this.perFieldAnalyzer = perFieldAnalyzer; - } - - /** - * Sets conditions for terms filtering - */ - public void setFilterSettings(Map filterSettings) { - this.filterSettings = filterSettings; - } - - /** - * Sets a routing to route a request to a particular shard - */ - public void setRouting(String routing) { - this.routing = routing; - } - - public String getRouting() { - return routing; - } - - /** - * Set a preference of which shard copies to execute the request - */ - public void setPreference(String preference) { - this.preference = preference; - } - - public String getPreference() { - return preference; - } - - /** - * Sets if the request should be realtime or near-realtime - */ - public void setRealtime(boolean realtime) { - this.realtime = realtime; - } - - /** - * Returns if the request is realtime(true) or near-realtime(false) - */ - public boolean getRealtime() { - return realtime; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("_index", index); - if (type != null) { - builder.field("_type", type); - } - if (id != null) builder.field("_id", id); - if (fields != null) builder.field("fields", fields); - // set values only when different from defaults - if (requestPositions == false) builder.field("positions", false); - if (requestPayloads == false) builder.field("payloads", false); - if (requestOffsets == false) builder.field("offsets", false); - if (requestFieldStatistics == false) builder.field("field_statistics", false); - if (requestTermStatistics) builder.field("term_statistics", true); - if (perFieldAnalyzer != null) builder.field("per_field_analyzer", perFieldAnalyzer); - - if (docBuilder != null) { - BytesReference doc = BytesReference.bytes(docBuilder); - try (InputStream stream = doc.streamInput()) { - builder.rawField("doc", stream, docBuilder.contentType()); - } - } - - if (filterSettings != null) { - builder.startObject("filter"); - String[] filterSettingNames = { - "max_num_terms", - "min_term_freq", - "max_term_freq", - "min_doc_freq", - "max_doc_freq", - "min_word_length", - "max_word_length" }; - for (String settingName : filterSettingNames) { - if (filterSettings.containsKey(settingName)) builder.field(settingName, filterSettings.get(settingName)); - } - builder.endObject(); - } - builder.endObject(); - return builder; - } - -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java deleted file mode 100644 index 89764f639e1a..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsResponse.java +++ /dev/null @@ -1,468 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.client.core; - -import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Objects; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - -public class TermVectorsResponse { - private final String index; - private final String id; - private final long docVersion; - private final boolean found; - private final long tookInMillis; - private final List termVectorList; - - public TermVectorsResponse(String index, String id, long version, boolean found, long tookInMillis, List termVectorList) { - this.index = index; - this.id = id; - this.docVersion = version; - this.found = found; - this.tookInMillis = tookInMillis; - this.termVectorList = termVectorList; - } - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "term_vectors", - true, - args -> { - // as the response comes from server, we are sure that args[5] will be a list of TermVector - @SuppressWarnings("unchecked") - List termVectorList = (List) args[5]; - if (termVectorList != null) { - Collections.sort(termVectorList, Comparator.comparing(TermVector::getFieldName)); - } - return new TermVectorsResponse( - (String) args[0], - (String) args[1], - (long) args[2], - (boolean) args[3], - (long) args[4], - termVectorList - ); - } - ); - - static { - PARSER.declareString(constructorArg(), new ParseField("_index")); - PARSER.declareString(optionalConstructorArg(), new ParseField("_id")); - PARSER.declareLong(constructorArg(), new ParseField("_version")); - PARSER.declareBoolean(constructorArg(), new ParseField("found")); - PARSER.declareLong(constructorArg(), new ParseField("took")); - PARSER.declareNamedObjects( - optionalConstructorArg(), - (p, c, fieldName) -> TermVector.fromXContent(p, fieldName), - new ParseField("term_vectors") - ); - } - - public static TermVectorsResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - /** - * Returns the index for the response - */ - public String getIndex() { - return index; - } - - /** - * Returns the id of the request - * can be NULL if there is no document ID - */ - public String getId() { - return id; - } - - /** - * Returns if the document is found - * always true for artificial documents - */ - public boolean getFound() { - return found; - } - - /** - * Returns the document version - */ - public long getDocVersion() { - return docVersion; - } - - /** - * Returns the time that a request took in milliseconds - */ - public long getTookInMillis() { - return tookInMillis; - } - - /** - * Returns the list of term vectors - */ - public List getTermVectorsList() { - return termVectorList; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if ((obj instanceof TermVectorsResponse) == false) return false; - TermVectorsResponse other = (TermVectorsResponse) obj; - return index.equals(other.index) - && Objects.equals(id, other.id) - && docVersion == other.docVersion - && found == other.found - && tookInMillis == other.tookInMillis - && Objects.equals(termVectorList, other.termVectorList); - } - - @Override - public int hashCode() { - return Objects.hash(index, id, docVersion, found, tookInMillis, termVectorList); - } - - public static final class TermVector { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "term_vector", - true, - (args, ctxFieldName) -> { - // as the response comes from server, we are sure that args[1] will be a list of Term - @SuppressWarnings("unchecked") - List terms = (List) args[1]; - if (terms != null) { - Collections.sort(terms, Comparator.comparing(Term::getTerm)); - } - return new TermVector(ctxFieldName, (FieldStatistics) args[0], terms); - } - ); - - static { - PARSER.declareObject(optionalConstructorArg(), (p, c) -> FieldStatistics.fromXContent(p), new ParseField("field_statistics")); - PARSER.declareNamedObjects(optionalConstructorArg(), (p, c, term) -> Term.fromXContent(p, term), new ParseField("terms")); - } - - private final String fieldName; - @Nullable - private final FieldStatistics fieldStatistics; - @Nullable - private final List terms; - - public TermVector(String fieldName, FieldStatistics fieldStatistics, List terms) { - this.fieldName = fieldName; - this.fieldStatistics = fieldStatistics; - this.terms = terms; - } - - public static TermVector fromXContent(XContentParser parser, String fieldName) { - return PARSER.apply(parser, fieldName); - } - - /** - * Returns the field name of the current term vector - */ - public String getFieldName() { - return fieldName; - } - - /** - * Returns the list of terms for the current term vector - */ - public List getTerms() { - return terms; - } - - /** - * Returns the field statistics for the current field - */ - public FieldStatistics getFieldStatistics() { - return fieldStatistics; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if ((obj instanceof TermVector) == false) return false; - TermVector other = (TermVector) obj; - return fieldName.equals(other.fieldName) - && Objects.equals(fieldStatistics, other.fieldStatistics) - && Objects.equals(terms, other.terms); - } - - @Override - public int hashCode() { - return Objects.hash(fieldName, fieldStatistics, terms); - } - - // Class containing a general field statistics for the field - public static final class FieldStatistics { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "field_statistics", - true, - args -> { return new FieldStatistics((long) args[0], (int) args[1], (long) args[2]); } - ); - - static { - PARSER.declareLong(constructorArg(), new ParseField("sum_doc_freq")); - PARSER.declareInt(constructorArg(), new ParseField("doc_count")); - PARSER.declareLong(constructorArg(), new ParseField("sum_ttf")); - } - private final long sumDocFreq; - private final int docCount; - private final long sumTotalTermFreq; - - public FieldStatistics(long sumDocFreq, int docCount, long sumTotalTermFreq) { - this.sumDocFreq = sumDocFreq; - this.docCount = docCount; - this.sumTotalTermFreq = sumTotalTermFreq; - } - - public static FieldStatistics fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - /* - * Returns how many documents this field contains - */ - public int getDocCount() { - return docCount; - } - - /** - * Returns the sum of document frequencies for all terms in this field - */ - public long getSumDocFreq() { - return sumDocFreq; - } - - /** - * Returns the sum of total term frequencies of all terms in this field - */ - public long getSumTotalTermFreq() { - return sumTotalTermFreq; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if ((obj instanceof FieldStatistics) == false) return false; - FieldStatistics other = (FieldStatistics) obj; - return docCount == other.docCount && sumDocFreq == other.sumDocFreq && sumTotalTermFreq == other.sumTotalTermFreq; - } - - @Override - public int hashCode() { - return Objects.hash(docCount, sumDocFreq, sumTotalTermFreq); - } - } - - public static final class Term { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "token", - true, - (args, ctxTerm) -> { - // as the response comes from server, we are sure that args[4] will be a list of Token - @SuppressWarnings("unchecked") - List tokens = (List) args[4]; - if (tokens != null) { - Collections.sort( - tokens, - Comparator.comparing(Token::getPosition, Comparator.nullsFirst(Integer::compareTo)) - .thenComparing(Token::getStartOffset, Comparator.nullsFirst(Integer::compareTo)) - .thenComparing(Token::getEndOffset, Comparator.nullsFirst(Integer::compareTo)) - ); - } - return new Term(ctxTerm, (int) args[0], (Integer) args[1], (Long) args[2], (Float) args[3], tokens); - } - ); - static { - PARSER.declareInt(constructorArg(), new ParseField("term_freq")); - PARSER.declareInt(optionalConstructorArg(), new ParseField("doc_freq")); - PARSER.declareLong(optionalConstructorArg(), new ParseField("ttf")); - PARSER.declareFloat(optionalConstructorArg(), new ParseField("score")); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> Token.fromXContent(p), new ParseField("tokens")); - } - - private final String term; - private final int termFreq; - @Nullable - private final Integer docFreq; - @Nullable - private final Long totalTermFreq; - @Nullable - private final Float score; - @Nullable - private final List tokens; - - public Term(String term, int termFreq, Integer docFreq, Long totalTermFreq, Float score, List tokens) { - this.term = term; - this.termFreq = termFreq; - this.docFreq = docFreq; - this.totalTermFreq = totalTermFreq; - this.score = score; - this.tokens = tokens; - } - - public static Term fromXContent(XContentParser parser, String term) { - return PARSER.apply(parser, term); - } - - /** - * Returns the string representation of the term - */ - public String getTerm() { - return term; - } - - /** - * Returns term frequency - the number of times this term occurs in the current document - */ - public int getTermFreq() { - return termFreq; - } - - /** - * Returns document frequency - the number of documents in the index that contain this term - */ - public Integer getDocFreq() { - return docFreq; - } - - /** - * Returns total term frequency - the number of times this term occurs across all documents - */ - public Long getTotalTermFreq() { - return totalTermFreq; - } - - /** - * Returns tf-idf score, if the request used some form of terms filtering - */ - public Float getScore() { - return score; - } - - /** - * Returns a list of tokens for the term - */ - public List getTokens() { - return tokens; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if ((obj instanceof Term) == false) return false; - Term other = (Term) obj; - return term.equals(other.term) - && termFreq == other.termFreq - && Objects.equals(docFreq, other.docFreq) - && Objects.equals(totalTermFreq, other.totalTermFreq) - && Objects.equals(score, other.score) - && Objects.equals(tokens, other.tokens); - } - - @Override - public int hashCode() { - return Objects.hash(term, termFreq, docFreq, totalTermFreq, score, tokens); - } - } - - public static final class Token { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "token", - true, - args -> { return new Token((Integer) args[0], (Integer) args[1], (Integer) args[2], (String) args[3]); } - ); - static { - PARSER.declareInt(optionalConstructorArg(), new ParseField("start_offset")); - PARSER.declareInt(optionalConstructorArg(), new ParseField("end_offset")); - PARSER.declareInt(optionalConstructorArg(), new ParseField("position")); - PARSER.declareString(optionalConstructorArg(), new ParseField("payload")); - } - - @Nullable - private final Integer startOffset; - @Nullable - private final Integer endOffset; - @Nullable - private final Integer position; - @Nullable - private final String payload; - - public Token(Integer startOffset, Integer endOffset, Integer position, String payload) { - this.startOffset = startOffset; - this.endOffset = endOffset; - this.position = position; - this.payload = payload; - } - - public static Token fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - /** - * Returns the start offset of the token in the document's field - */ - public Integer getStartOffset() { - return startOffset; - } - - /** - * Returns the end offset of the token in the document's field - */ - public Integer getEndOffset() { - return endOffset; - } - - /** - * Returns the position of the token in the document's field - */ - public Integer getPosition() { - return position; - } - - /** - * Returns the payload of the token or null if the payload doesn't exist - */ - public String getPayload() { - return payload; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if ((obj instanceof Token) == false) return false; - Token other = (Token) obj; - return Objects.equals(startOffset, other.startOffset) - && Objects.equals(endOffset, other.endOffset) - && Objects.equals(position, other.position) - && Objects.equals(payload, other.payload); - } - - @Override - public int hashCode() { - return Objects.hash(startOffset, endOffset, position, payload); - } - } - } -} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/license/LicenseStatus.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/license/LicenseStatus.java deleted file mode 100644 index 3038b702c84e..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/license/LicenseStatus.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.client.license; - -/** - * Status of an X-Pack license. - */ -public enum LicenseStatus { - - ACTIVE("active"), - INVALID("invalid"), - EXPIRED("expired"); - - private final String label; - - LicenseStatus(String label) { - this.label = label; - } - - public String label() { - return label; - } - - public static LicenseStatus fromString(String value) { - return switch (value) { - case "active" -> ACTIVE; - case "invalid" -> INVALID; - case "expired" -> EXPIRED; - default -> throw new IllegalArgumentException("unknown license status [" + value + "]"); - }; - } -} diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index e57c6cf32141..d91d919619ff 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -8,7 +8,7 @@ import org.apache.tools.ant.filters.ReplaceTokens -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' // The integ-test-distribution is published to maven apply plugin: 'elasticsearch.publish' diff --git a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java index bdb0a76cf970..19afb4932ff2 100644 --- a/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/javaRestTest/java/org/elasticsearch/test/rest/WaitForRefreshAndCloseIT.java @@ -10,6 +10,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -117,11 +118,7 @@ private ActionFuture start(Request request) { client().performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { - try { - future.onResponse(EntityUtils.toString(response.getEntity())); - } catch (IOException e) { - future.onFailure(e); - } + ActionListener.completeWith(future, () -> EntityUtils.toString(response.getEntity())); } @Override diff --git a/distribution/build.gradle b/distribution/build.gradle index 6b8cdb128042..08920ed173d7 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -116,6 +116,14 @@ def processIntegTestOutputsTaskProvider = tasks.register("processIntegTestOutput into integTestOutputs } +def integTestConfigFiles = fileTree("${integTestOutputs}/config") { + builtBy processIntegTestOutputsTaskProvider +} + +def integTestBinFiles = fileTree("${integTestOutputs}/bin") { + builtBy processIntegTestOutputsTaskProvider +} + def defaultModulesFiles = fileTree("${defaultOutputs}/modules") { builtBy processDefaultOutputsTaskProvider } @@ -358,7 +366,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { filter("tokens" : expansionsForDistribution(distributionType, isTestDistro), ReplaceTokens.class) } from buildDefaultLog4jConfigTaskProvider - from defaultConfigFiles + from isTestDistro ? integTestConfigFiles : defaultConfigFiles } } @@ -388,7 +396,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { // module provided bin files with copySpec { eachFile { it.setMode(0755) } - from(defaultBinFiles) + from(testDistro ? integTestBinFiles : defaultBinFiles) if (distributionType != 'zip') { exclude '*.bat' } diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 8827e5e4b808..11b0f99c5026 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -15,10 +15,10 @@ import org.elasticsearch.gradle.internal.info.BuildParams BuildParams.getBwcVersions().forPreviousUnreleased { unreleasedVersion -> project(unreleasedVersion.gradleProjectPath) { Version currentVersion = Version.fromString(version) - TaskProvider resolveAllBwcDepsTaskProvider = bwcSetup.bwcTask("resolveAllBwcDependencies") { + TaskProvider resolveAllBwcDepsTaskProvider = bwcSetup.bwcTask("resolveAllBwcDependencies", { t -> t.args("resolveAllDependencies", "-Dorg.gradle.warning.mode=none") - } - if (currentVersion.getMinor() == 0 && currentVersion.getRevision() == 0) { + }, false) + if (Boolean.getBoolean("recurse.bwc")) { // We only want to resolve dependencies for live versions of main, without cascading this to older versions tasks.named("resolveAllDependencies").configure { dependsOn(resolveAllBwcDepsTaskProvider) diff --git a/distribution/docker/README.md b/distribution/docker/README.md index 2e22fe099f4f..4c8052cfc26b 100644 --- a/distribution/docker/README.md +++ b/distribution/docker/README.md @@ -92,6 +92,33 @@ images, and combining them with a Docker manifest. The Elasticsearch Delivery team aren't responsible for this - rather, it happens during our unified release process. +To build multi-architecture images on `x86_64` hosts using Docker[^1], you'll +need [buildx](https://docs.docker.com/build/buildx/install/) and ensure that it +supports both `linux/amd64` **and** `linux/arm64` targets. + +You can verify the supported targets using `docker buildx ls`. For example, the +following output indicates that support for `linux/arm64` is missing: + +```shell +$ docker buildx ls +NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS +default * docker + default default running 20.10.21 linux/amd64, linux/386 +``` + +On Linux `x86_64` hosts, to enable `linux-arm64` you need to install +[qemu-user-static-binfmt](https://github.com/multiarch/qemu-user-static). +Installation details depend on the Linux distribution but, as described in the +[getting started docs](https://github.com/multiarch/qemu-user-static#getting-started), +running `docker run --rm --privileged multiarch/qemu-user-static --reset -p yes` +will add the necessary support (but will not persist across reboots): + +```shell +$ docker buildx ls +NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS +default * docker + default default running 20.10.21 linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6 +``` ## Testing @@ -130,3 +157,5 @@ Ideally this import / export stuff should be completely removed. [DockerTests]: ../../qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java [multi-arch]: https://www.docker.com/blog/multi-arch-build-and-images-the-simple-way/ [ubi]: https://developers.redhat.com/products/rhel/ubi + +[^1]: `podman/buildah` also [supports building multi-platform images](https://github.com/containers/buildah/issues/1590). diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index fe4a1e8b48a7..52b8d7b574d4 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -8,14 +8,13 @@ import org.elasticsearch.gradle.internal.docker.DockerSupportPlugin import org.elasticsearch.gradle.internal.docker.DockerSupportService import org.elasticsearch.gradle.internal.docker.ShellRetry import org.elasticsearch.gradle.internal.docker.TransformLog4jConfigFilter -import org.elasticsearch.gradle.internal.dra.DraResolvePlugin import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.util.GradleUtils import java.nio.file.Path import java.time.temporal.ChronoUnit -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.test.fixtures' apply plugin: 'elasticsearch.internal-distribution-download' apply plugin: 'elasticsearch.dra-artifacts' @@ -75,11 +74,12 @@ configurations { log4jConfig tini allPlugins - filebeat - metricbeat + filebeat_aarch64 + filebeat_x86_64 + metricbeat_aarch64 + metricbeat_x86_64 } -String beatsArch = Architecture.current() == Architecture.AARCH64 ? 'arm64' : 'x86_64' String tiniArch = Architecture.current() == Architecture.AARCH64 ? 'arm64' : 'amd64' dependencies { @@ -88,8 +88,10 @@ dependencies { log4jConfig project(path: ":distribution", configuration: 'log4jConfig') tini "krallin:tini:0.19.0:${tiniArch}" allPlugins project(path: ':plugins', configuration: 'allPlugins') - filebeat "beats:filebeat:${VersionProperties.elasticsearch}:linux-${beatsArch}@tar.gz" - metricbeat "beats:metricbeat:${VersionProperties.elasticsearch}:linux-${beatsArch}@tar.gz" + filebeat_aarch64 "beats:filebeat:${VersionProperties.elasticsearch}:linux-arm64@tar.gz" + filebeat_x86_64 "beats:filebeat:${VersionProperties.elasticsearch}:linux-x86_64@tar.gz" + metricbeat_aarch64 "beats:metricbeat:${VersionProperties.elasticsearch}:linux-arm64@tar.gz" + metricbeat_x86_64 "beats:metricbeat:${VersionProperties.elasticsearch}:linux-x86_64@tar.gz" } ext.expansions = { Architecture architecture, DockerBase base -> @@ -273,8 +275,8 @@ void addBuildDockerContextTask(Architecture architecture, DockerBase base) { boolean includeBeats = VersionProperties.isElasticsearchSnapshot() == true || buildId != null || useDra if (includeBeats) { - from configurations.filebeat - from configurations.metricbeat + from configurations.getByName("filebeat_${architecture.classifier}") + from configurations.getByName("metricbeat_${architecture.classifier}") } // For some reason, the artifact name can differ depending on what repository we used. rename ~/((?:file|metric)beat)-.*\.tar\.gz$/, "\$1-${VersionProperties.elasticsearch}.tar.gz" @@ -548,5 +550,5 @@ subprojects { Project subProject -> tasks.named('resolveAllDependencies') { // Don't try and resolve filebeat or metricbeat snapshots as they may not always be available - configs = configurations.matching { it.name.endsWith('beat') == false } + configs = configurations.matching { it.name.contains('beat') == false } } diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 58d83410cd6d..c999c666ade3 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -72,4 +72,4 @@ @error.file@ ## GC logging --Xlog:gc*,gc+age=trace,safepoint:file=@loggc@:utctime,pid,tags:filecount=32,filesize=64m +-Xlog:gc*,gc+age=trace,safepoint:file=@loggc@:utctime,level,pid,tags:filecount=32,filesize=64m diff --git a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java index 02f6b6b3e7ab..f6e357881168 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/elasticsearch/cli/keystore/KeyStoreWrapperTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.core.IOUtils; @@ -51,8 +52,10 @@ import javax.crypto.spec.SecretKeySpec; import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -454,6 +457,48 @@ public void testLegacyV3() throws GeneralSecurityException, IOException { assertThat(toByteArray(wrapper.getFile("file_setting")), equalTo("file_value".getBytes(StandardCharsets.UTF_8))); } + public void testSerializationNewlyCreated() throws Exception { + final KeyStoreWrapper wrapper = KeyStoreWrapper.create(); + wrapper.setString("string_setting", "string_value".toCharArray()); + + // testing when dataBytes[] is null + final BytesStreamOutput out = new BytesStreamOutput(); + wrapper.writeTo(out); + final KeyStoreWrapper fromStream = new KeyStoreWrapper(out.bytes().streamInput()); + + assertThat(fromStream.getSettingNames(), hasSize(2)); + assertThat(fromStream.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed")); + + assertEquals(wrapper.getString("string_setting"), fromStream.getString("string_setting")); + assertFalse(wrapper.hasPassword()); + } + + public void testSerializationWhenLoadedFromFile() throws Exception { + final KeyStoreWrapper wrapper = KeyStoreWrapper.create(); + wrapper.setString("string_setting", "string_value".toCharArray()); + + // testing with password and raw dataBytes[] + final char[] password = getPossibleKeystorePassword(); + wrapper.save(env.configFile(), password); + final KeyStoreWrapper fromFile = KeyStoreWrapper.load(env.configFile()); + fromFile.decrypt(password); + + assertThat(fromFile.getSettingNames(), hasSize(2)); + assertThat(fromFile.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed")); + + assertEquals(wrapper.getString("string_setting"), fromFile.getString("string_setting")); + + final BytesStreamOutput secondOut = new BytesStreamOutput(); + fromFile.writeTo(secondOut); + final KeyStoreWrapper fromStreamSecond = new KeyStoreWrapper(secondOut.bytes().streamInput()); + + assertThat(fromStreamSecond.getSettingNames(), hasSize(2)); + assertThat(fromStreamSecond.getSettingNames(), containsInAnyOrder("string_setting", "keystore.seed")); + + assertEquals(wrapper.getString("string_setting"), fromStreamSecond.getString("string_setting")); + assertEquals(fromFile.hasPassword(), fromStreamSecond.hasPassword()); + } + private byte[] toByteArray(final InputStream is) throws IOException { final ByteArrayOutputStream os = new ByteArrayOutputStream(); final byte[] buffer = new byte[1024]; diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index d1a53a0e11bd..8ea5b2f963b7 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -10,9 +10,18 @@ apply plugin: 'elasticsearch.build' archivesBaseName = 'elasticsearch-plugin-cli' +tasks.named("dependencyLicenses").configure { + mapping from: /asm-.*/, to: 'asm' +} + dependencies { compileOnly project(":server") compileOnly project(":libs:elasticsearch-cli") + implementation project(":libs:elasticsearch-plugin-api") + implementation project(":libs:elasticsearch-plugin-scanner") + implementation 'org.ow2.asm:asm:9.4' + implementation 'org.ow2.asm:asm-tree:9.4' + api "org.bouncycastle:bcpg-fips:1.0.4" api "org.bouncycastle:bc-fips:1.0.2" testImplementation project(":test:framework") diff --git a/distribution/tools/plugin-cli/licenses/asm-LICENSE.txt b/distribution/tools/plugin-cli/licenses/asm-LICENSE.txt new file mode 100644 index 000000000000..afb064f2f266 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/asm-LICENSE.txt @@ -0,0 +1,26 @@ +Copyright (c) 2012 France Télécom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. diff --git a/distribution/tools/plugin-cli/licenses/asm-NOTICE.txt b/distribution/tools/plugin-cli/licenses/asm-NOTICE.txt new file mode 100644 index 000000000000..8d1c8b69c3fc --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/asm-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index 5f7b46e2a749..1afdcd17287f 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -38,9 +38,12 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.jdk.JarHell; +import org.elasticsearch.plugin.scanner.ClassReaders; +import org.elasticsearch.plugin.scanner.NamedComponentScanner; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.plugins.PluginsUtils; +import org.objectweb.asm.ClassReader; import java.io.BufferedReader; import java.io.Closeable; @@ -82,6 +85,7 @@ import java.util.Timer; import java.util.TimerTask; import java.util.stream.Collectors; +import java.util.stream.Stream; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; @@ -197,6 +201,7 @@ public class InstallPluginAction implements Closeable { private Environment env; private boolean batch; private Proxy proxy = null; + private NamedComponentScanner scanner = new NamedComponentScanner(); public InstallPluginAction(Terminal terminal, Environment env, boolean batch) { this.terminal = terminal; @@ -208,7 +213,6 @@ public void setProxy(Proxy proxy) { this.proxy = proxy; } - // pkg private for testing public void execute(List plugins) throws Exception { if (plugins.isEmpty()) { throw new UserException(ExitCodes.USAGE, "at least one plugin id is required"); @@ -867,9 +871,24 @@ private PluginDescriptor loadPluginInfo(Path pluginRoot) throws Exception { // check for jar hell before any copying jarHellCheck(info, pluginRoot, env.pluginsFile(), env.modulesFile()); + if (info.isStable() && hasNamedComponentFile(pluginRoot) == false) { + generateNameComponentFile(pluginRoot); + } return info; } + private void generateNameComponentFile(Path pluginRoot) throws IOException { + Stream classPath = ClassReaders.ofClassPath().stream(); // contains plugin-api + List classReaders = Stream.concat(ClassReaders.ofDirWithJars(pluginRoot).stream(), classPath).toList(); + Map> namedComponentsMap = scanner.scanForNamedClasses(classReaders); + Path outputFile = pluginRoot.resolve(PluginDescriptor.NAMED_COMPONENTS_FILENAME); + scanner.writeToFile(namedComponentsMap, outputFile); + } + + private boolean hasNamedComponentFile(Path pluginRoot) { + return Files.exists(pluginRoot.resolve(PluginDescriptor.NAMED_COMPONENTS_FILENAME)); + } + private static final String LIB_TOOLS_PLUGIN_CLI_CLASSPATH_JAR; static { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index 9341c469462b..45328397f6ae 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -43,15 +43,19 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.plugin.scanner.NamedComponentScanner; import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.PluginDescriptor; import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PosixPermissionsResetter; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; import org.junit.After; import org.junit.Before; @@ -85,6 +89,7 @@ import java.util.Arrays; import java.util.Date; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -123,6 +128,7 @@ public class InstallPluginActionTests extends ESTestCase { private MockTerminal terminal; private Tuple env; private Path pluginDir; + private NamedComponentScanner namedComponentScanner; private final boolean isPosix; private final boolean isReal; @@ -130,6 +136,8 @@ public class InstallPluginActionTests extends ESTestCase { @SuppressForbidden(reason = "sets java.io.tmpdir") public InstallPluginActionTests(FileSystem fs, Function temp) { + assert "false".equals(System.getProperty("tests.security.manager")) : "-Dtests.security.manager=false has to be set"; + this.temp = temp; this.isPosix = fs.supportedFileAttributeViews().contains("posix"); this.isReal = fs == PathUtils.getDefaultFileSystem(); @@ -151,6 +159,7 @@ void jarHellCheck(PluginDescriptor candidateInfo, Path candidate, Path pluginsDi // no jarhell check } }; + defaultAction = new InstallPluginAction(terminal, env.v2(), false); } @@ -198,7 +207,9 @@ private static Configuration toPosix(Configuration configuration) { return configuration.toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); } - /** Creates a test environment with bin, config and plugins directories. */ + /** + * Creates a test environment with bin, config and plugins directories. + */ static Tuple createEnv(Function temp) throws IOException { Path home = temp.apply("install-plugin-command-tests"); Files.createDirectories(home.resolve("bin")); @@ -215,7 +226,9 @@ static Path createPluginDir(Function temp) { return temp.apply("pluginDir"); } - /** creates a fake jar file with empty class files */ + /** + * creates a fake jar file with empty class files + */ static void writeJar(Path jar, String... classes) throws IOException { try (ZipOutputStream stream = new ZipOutputStream(Files.newOutputStream(jar))) { for (String clazz : classes) { @@ -236,13 +249,47 @@ static Path writeZip(Path structure, String prefix) throws IOException { return zip; } - /** creates a plugin .zip and returns the url for testing */ + /** + * creates a plugin .zip and returns the url for testing + */ static InstallablePlugin createPluginZip(String name, Path structure, String... additionalProps) throws IOException { return createPlugin(name, structure, additionalProps); } + static void writeStablePlugin(String name, Path structure, boolean hasNamedComponentFile, String... additionalProps) + throws IOException { + String[] properties = pluginProperties(name, additionalProps, true); + PluginTestUtil.writeStablePluginProperties(structure, properties); + + if (hasNamedComponentFile) { + PluginTestUtil.writeNamedComponentsFile(structure, namedComponentsJSON()); + } + Path jar = structure.resolve("plugin.jar"); + + JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugins.cli.test_model.*; + @NamedComponent("a_component") + public class A implements ExtensibleInterface{} + """), "p/B.class", InMemoryJavaCompiler.compile("p.B", """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugins.cli.test_model.*; + @NamedComponent("b_component") + public class B implements ExtensibleInterface{} + """))); + } + static void writePlugin(String name, Path structure, String... additionalProps) throws IOException { - String[] properties = Stream.concat( + String[] properties = pluginProperties(name, additionalProps, false); + PluginTestUtil.writePluginProperties(structure, properties); + String className = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1) + "Plugin"; + writeJar(structure.resolve("plugin.jar"), className); + } + + private static String[] pluginProperties(String name, String[] additionalProps, boolean isStable) { + return Stream.of( Stream.of( "description", "fake desc", @@ -253,15 +300,12 @@ static void writePlugin(String name, Path structure, String... additionalProps) "elasticsearch.version", Version.CURRENT.toString(), "java.version", - System.getProperty("java.specification.version"), - "classname", - "FakePlugin" + System.getProperty("java.specification.version") + ), + isStable ? Stream.empty() : Stream.of("classname", "FakePlugin"), Arrays.stream(additionalProps) - ).toArray(String[]::new); - PluginTestUtil.writePluginProperties(structure, properties); - String className = name.substring(0, 1).toUpperCase(Locale.ENGLISH) + name.substring(1) + "Plugin"; - writeJar(structure.resolve("plugin.jar"), className); + ).flatMap(Function.identity()).toArray(String[]::new); } static void writePluginSecurityPolicy(Path pluginDir, String... permissions) throws IOException { @@ -275,6 +319,12 @@ static void writePluginSecurityPolicy(Path pluginDir, String... permissions) thr Files.write(pluginDir.resolve("plugin-security.policy"), securityPolicyContent.toString().getBytes(StandardCharsets.UTF_8)); } + static InstallablePlugin createStablePlugin(String name, Path structure, boolean hasNamedComponentFile, String... additionalProps) + throws IOException { + writeStablePlugin(name, structure, hasNamedComponentFile, additionalProps); + return new InstallablePlugin(name, writeZip(structure, null).toUri().toURL().toString()); + } + static InstallablePlugin createPlugin(String name, Path structure, String... additionalProps) throws IOException { writePlugin(name, structure, additionalProps); return new InstallablePlugin(name, writeZip(structure, null).toUri().toURL().toString()); @@ -309,6 +359,11 @@ void assertPlugin(String name, Path original, Environment environment) throws IO assertInstallCleaned(environment); } + void assertNamedComponentFile(String name, Path pluginDir, String expectedContent) throws IOException { + Path namedComponents = pluginDir.resolve(name).resolve(PluginDescriptor.NAMED_COMPONENTS_FILENAME); + assertThat(Files.readString(namedComponents), equalTo(expectedContent)); + } + void assertPluginInternal(String name, Path pluginsFile, Path originalPlugin) throws IOException { Path got = pluginsFile.resolve(name); assertTrue("dir " + name + " exists", Files.exists(got)); @@ -505,7 +560,7 @@ public void testInstallFailsIfPreviouslyRemovedPluginFailed() throws Exception { final Path removing = env.v2().pluginsFile().resolve(".removing-failed"); Files.createDirectory(removing); final IllegalStateException e = expectThrows(IllegalStateException.class, () -> installPlugin(pluginZip)); - final String expected = formatted( + final String expected = Strings.format( "found file [%s] from a failed attempt to remove the plugin [failed]; execute [elasticsearch-plugin remove failed]", removing ); @@ -1506,4 +1561,42 @@ public void testInstallMigratedPlugins() throws Exception { assertThat(terminal.getErrorOutput(), containsString("[" + id + "] is no longer a plugin")); } } + + public void testStablePluginWithNamedComponentsFile() throws Exception { + InstallablePlugin stablePluginZip = createStablePlugin("stable1", pluginDir, true); + installPlugins(List.of(stablePluginZip), env.v1()); + assertPlugin("stable1", pluginDir, env.v2()); + assertNamedComponentFile("stable1", env.v2().pluginsFile(), namedComponentsJSON()); + } + + @SuppressWarnings("unchecked") + public void testStablePluginWithoutNamedComponentsFile() throws Exception { + // named component will have to be generated upon install + InstallablePlugin stablePluginZip = createStablePlugin("stable1", pluginDir, false); + + installPlugins(List.of(stablePluginZip), env.v1()); + + assertPlugin("stable1", pluginDir, env.v2()); + assertNamedComponentFile("stable1", env.v2().pluginsFile(), namedComponentsJSON()); + } + + private Map> namedComponentsMap() { + Map> result = new LinkedHashMap<>(); + Map extensibles = new LinkedHashMap<>(); + extensibles.put("a_component", "p.A"); + extensibles.put("b_component", "p.B"); + result.put("org.elasticsearch.plugins.cli.test_model.ExtensibleInterface", extensibles); + return result; + } + + private static String namedComponentsJSON() { + return """ + { + "org.elasticsearch.plugins.cli.test_model.ExtensibleInterface": { + "a_component": "p.A", + "b_component": "p.B" + } + } + """.replaceAll("[\n\r\s]", ""); + } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/test_model/ExtensibleInterface.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/test_model/ExtensibleInterface.java new file mode 100644 index 000000000000..517c57a03680 --- /dev/null +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/test_model/ExtensibleInterface.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugins.cli.test_model; + +import org.elasticsearch.plugin.Extensible; + +@Extensible +public interface ExtensibleInterface {} diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java index b89eb166aad8..c13ab9d10681 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/APMJvmOptions.java @@ -13,7 +13,7 @@ import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; @@ -133,11 +133,10 @@ class APMJvmOptions { * because it will be deleted once Elasticsearch starts. * * @param settings the Elasticsearch settings to consider - * @param keystore a wrapper to access the keystore, or null if there is no keystore + * @param secrets a wrapper to access the secrets, or null if there is no secrets * @param tmpdir Elasticsearch's temporary directory, where the config file will be written */ - static List apmJvmOptions(Settings settings, @Nullable KeyStoreWrapper keystore, Path tmpdir) throws UserException, - IOException { + static List apmJvmOptions(Settings settings, @Nullable SecureSettings secrets, Path tmpdir) throws UserException, IOException { final Path agentJar = findAgentJar(); if (agentJar == null) { @@ -158,8 +157,8 @@ static List apmJvmOptions(Settings settings, @Nullable KeyStoreWrapper k } } - if (keystore != null) { - extractSecureSettings(keystore, propertiesMap); + if (secrets != null) { + extractSecureSettings(secrets, propertiesMap); } final Map dynamicSettings = extractDynamicSettings(propertiesMap); @@ -180,11 +179,11 @@ static String agentCommandLineOption(Path agentJar, Path tmpPropertiesFile) { return "-javaagent:" + agentJar + "=c=" + tmpPropertiesFile; } - private static void extractSecureSettings(KeyStoreWrapper keystore, Map propertiesMap) { - final Set settingNames = keystore.getSettingNames(); + private static void extractSecureSettings(SecureSettings secrets, Map propertiesMap) { + final Set settingNames = secrets.getSettingNames(); for (String key : List.of("api_key", "secret_token")) { if (settingNames.contains("tracing.apm." + key)) { - try (SecureString token = keystore.getString("tracing.apm." + key)) { + try (SecureString token = secrets.getString("tracing.apm." + key)) { propertiesMap.put(key, token.toString()); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ErrorPumpThread.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ErrorPumpThread.java index d8f0c4471c65..397e6a327d1a 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ErrorPumpThread.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ErrorPumpThread.java @@ -19,19 +19,14 @@ import java.util.concurrent.CountDownLatch; import static org.elasticsearch.bootstrap.BootstrapInfo.SERVER_READY_MARKER; -import static org.elasticsearch.bootstrap.BootstrapInfo.USER_EXCEPTION_MARKER; import static org.elasticsearch.server.cli.ProcessUtil.nonInterruptibleVoid; /** * A thread which reads stderr of the jvm process and writes it to this process' stderr. * - *

Two special state markers are watched for. These are ascii control characters which signal - * to the cli process something has changed in the server process. The two possible special messages are: - *

    - *
  • {@link BootstrapInfo#USER_EXCEPTION_MARKER} - signals a bootstrap error has occurred, and is followed - * by the error message
  • - *
  • {@link BootstrapInfo#SERVER_READY_MARKER} - signals the server is ready so the cli may detach if daemonizing
  • - *
+ *

The thread watches for a special state marker from the process. The ascii character + * {@link BootstrapInfo#SERVER_READY_MARKER} signals the server is ready and the cli may + * detach if daemonizing. All other messages are passed through to stderr. */ class ErrorPumpThread extends Thread { private final BufferedReader reader; @@ -43,9 +38,6 @@ class ErrorPumpThread extends Thread { // a flag denoting whether the ready marker has been received by the server process private volatile boolean ready; - // an exception message received alongside the user exception marker, if a bootstrap error has occurred - private volatile String userExceptionMsg; - // an unexpected io failure that occurred while pumping stderr private volatile IOException ioFailure; @@ -66,10 +58,6 @@ String waitUntilReady() throws IOException { if (ioFailure != null) { throw ioFailure; } - if (ready == false) { - return userExceptionMsg; - } - assert userExceptionMsg == null; return null; } @@ -85,10 +73,7 @@ public void run() { try { String line; while ((line = reader.readLine()) != null) { - if (line.isEmpty() == false && line.charAt(0) == USER_EXCEPTION_MARKER) { - userExceptionMsg = line.substring(1); - readyOrDead.countDown(); - } else if (line.isEmpty() == false && line.charAt(0) == SERVER_READY_MARKER) { + if (line.isEmpty() == false && line.charAt(0) == SERVER_READY_MARKER) { ready = true; readyOrDead.countDown(); } else { diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java index 46e3da3ced90..926d5727a1b4 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmErgonomics.java @@ -64,28 +64,42 @@ static boolean tuneG1GCForSmallHeap(final long heapSize) { } static boolean tuneG1GCHeapRegion(final Map finalJvmOptions, final boolean tuneG1GCForSmallHeap) { - JvmOption g1GCHeapRegion = finalJvmOptions.get("G1HeapRegionSize"); - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - return (tuneG1GCForSmallHeap && g1GC.getMandatoryValue().equals("true") && g1GCHeapRegion.isCommandLineOrigin() == false); + return tuneG1GCForSmallHeap && usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "G1HeapRegionSize"); } static int tuneG1GCReservePercent(final Map finalJvmOptions, final boolean tuneG1GCForSmallHeap) { - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - JvmOption g1GCReservePercent = finalJvmOptions.get("G1ReservePercent"); - if (g1GC.getMandatoryValue().equals("true")) { - if (g1GCReservePercent.isCommandLineOrigin() == false && tuneG1GCForSmallHeap) { - return 15; - } else if (g1GCReservePercent.isCommandLineOrigin() == false && tuneG1GCForSmallHeap == false) { - return 25; - } + if (usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "G1ReservePercent")) { + return tuneG1GCForSmallHeap ? 15 : 25; } return 0; } static boolean tuneG1GCInitiatingHeapOccupancyPercent(final Map finalJvmOptions) { - JvmOption g1GC = finalJvmOptions.get("UseG1GC"); - JvmOption g1GCInitiatingHeapOccupancyPercent = finalJvmOptions.get("InitiatingHeapOccupancyPercent"); - return g1GCInitiatingHeapOccupancyPercent.isCommandLineOrigin() == false && g1GC.getMandatoryValue().equals("true"); + return usingG1GcWithoutCommandLineOriginOption(finalJvmOptions, "InitiatingHeapOccupancyPercent"); + } + + /** + * @return

    + *
  • {@code true} if `-XX:+UseG1GC` is in the final JVM options and {@code optionName} was not specified. + *
  • {@code false} if either `-XX:-UseG1GC` is in the final JVM options, or {@code optionName} was specified. + *
+ * + * @throws IllegalStateException if neither `-XX:+UseG1GC` nor `-XX:-UseG1GC` is in the final JVM options, or `-XX:+UseG1GC` is selected + * and {@code optionName} is not in the final JVM options. + */ + private static boolean usingG1GcWithoutCommandLineOriginOption(Map finalJvmOptions, String optionName) { + return getRequiredOption(finalJvmOptions, "UseG1GC").getMandatoryValue().equals("true") + && getRequiredOption(finalJvmOptions, optionName).isCommandLineOrigin() == false; + } + + private static JvmOption getRequiredOption(final Map finalJvmOptions, final String key) { + final var jvmOption = finalJvmOptions.get(key); + if (jvmOption == null) { + throw new IllegalStateException( + "JVM option [" + key + "] was unexpectedly missing. Elasticsearch requires this option to be present." + ); + } + return jvmOption; } private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?[\\w+].*?)=(?.*)$"); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java index 39bf2e54dade..60cbcb86c02b 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOption.java @@ -8,6 +8,8 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.Strings; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -29,6 +31,11 @@ class JvmOption { private final String origin; JvmOption(String value, String origin) { + if (origin == null) { + throw new IllegalStateException(Strings.format(""" + Elasticsearch could not determine the origin of JVM option [%s]. \ + This indicates that it is running in an unsupported configuration.""", value)); + } this.value = value; this.origin = origin; } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java index b20aad3a0b84..1b92300ac3dd 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java @@ -11,7 +11,6 @@ import org.elasticsearch.bootstrap.ServerArgs; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.UserException; -import org.elasticsearch.common.settings.KeyStoreWrapper; import java.io.BufferedReader; import java.io.IOException; @@ -70,7 +69,7 @@ SortedMap invalidLines() { * files in the {@code jvm.options.d} directory, and the options given by the {@code ES_JAVA_OPTS} environment * variable. * - * @param keystore the installation's keystore + * @param args the start-up arguments * @param configDir the ES config dir * @param tmpDir the directory that should be passed to {@code -Djava.io.tmpdir} * @param envOptions the options passed through the ES_JAVA_OPTS env var @@ -79,8 +78,8 @@ SortedMap invalidLines() { * @throws IOException if there is a problem reading any of the files * @throws UserException if there is a problem parsing the `jvm.options` file or `jvm.options.d` files */ - static List determineJvmOptions(ServerArgs args, KeyStoreWrapper keystore, Path configDir, Path tmpDir, String envOptions) - throws InterruptedException, IOException, UserException { + static List determineJvmOptions(ServerArgs args, Path configDir, Path tmpDir, String envOptions) throws InterruptedException, + IOException, UserException { final JvmOptionsParser parser = new JvmOptionsParser(); @@ -89,7 +88,7 @@ static List determineJvmOptions(ServerArgs args, KeyStoreWrapper keystor substitutions.put("ES_PATH_CONF", configDir.toString()); try { - return parser.jvmOptions(args, keystore, configDir, tmpDir, envOptions, substitutions); + return parser.jvmOptions(args, configDir, tmpDir, envOptions, substitutions); } catch (final JvmOptionsFileParserException e) { final String errorMessage = String.format( Locale.ROOT, @@ -120,7 +119,6 @@ static List determineJvmOptions(ServerArgs args, KeyStoreWrapper keystor private List jvmOptions( ServerArgs args, - KeyStoreWrapper keystore, final Path config, Path tmpDir, final String esJavaOpts, @@ -141,7 +139,7 @@ private List jvmOptions( final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(); - final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), keystore, tmpDir); + final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), tmpDir); final List finalJvmOptions = new ArrayList<>( systemJvmOptions.size() + substitutedJvmOptions.size() + ergonomicJvmOptions.size() + apmOptions.size() diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/KeyStoreLoader.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/KeyStoreLoader.java new file mode 100644 index 000000000000..6741e95e98dc --- /dev/null +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/KeyStoreLoader.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.server.cli; + +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.env.Environment; + +import java.util.Optional; + +/** + * Implementation of {@link SecureSettingsLoader} for {@link KeyStoreWrapper} + */ +public class KeyStoreLoader implements SecureSettingsLoader { + @Override + public LoadedSecrets load(Environment environment, Terminal terminal) throws Exception { + // See if we have a keystore already present + KeyStoreWrapper secureSettings = KeyStoreWrapper.load(environment.configFile()); + // If there's no keystore or the keystore has no password, set an empty password + var password = (secureSettings == null || secureSettings.hasPassword() == false) + ? new SecureString(new char[0]) + : new SecureString(terminal.readSecret(KeyStoreWrapper.PROMPT)); + + return new LoadedSecrets(secureSettings, Optional.of(password)); + } + + @Override + public SecureSettings bootstrap(Environment environment, SecureString password) throws Exception { + return KeyStoreWrapper.bootstrap(environment.configFile(), () -> password); + } + + @Override + public boolean supportsSecurityAutoConfiguration() { + return true; + } +} diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SecureSettingsLoader.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SecureSettingsLoader.java new file mode 100644 index 000000000000..98fc47f4e64b --- /dev/null +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SecureSettingsLoader.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.server.cli; + +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.env.Environment; + +import java.util.Optional; + +/** + * An interface for implementing {@link SecureSettings} loaders, that is, implementations that create, initialize and load + * secrets stores. + */ +public interface SecureSettingsLoader { + /** + * Loads an existing SecureSettings implementation + */ + LoadedSecrets load(Environment environment, Terminal terminal) throws Exception; + + /** + * Loads an existing SecureSettings implementation, creates one if it doesn't exist + */ + SecureSettings bootstrap(Environment environment, SecureString password) throws Exception; + + /** + * A load result for loading a SecureSettings implementation from a SecureSettingsLoader + * @param secrets the loaded secure settings + * @param password an optional password if the implementation required one + */ + record LoadedSecrets(SecureSettings secrets, Optional password) implements AutoCloseable { + @Override + public void close() throws Exception { + if (password.isPresent()) { + password.get().close(); + } + } + } + + boolean supportsSecurityAutoConfiguration(); +} diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java index 73269b8c719f..e473301ad67f 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerCli.java @@ -22,7 +22,7 @@ import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.cli.EnvironmentAwareCommand; -import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -75,20 +75,29 @@ public void execute(Terminal terminal, OptionSet options, Environment env, Proce validateConfig(options, env); - try (KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile())) { - // setup security - final SecureString keystorePassword = getKeystorePassword(keystore, terminal); - env = autoConfigureSecurity(terminal, options, processInfo, env, keystorePassword); + var secureSettingsLoader = secureSettingsLoader(env); + + try ( + var loadedSecrets = secureSettingsLoader.load(env, terminal); + var password = (loadedSecrets.password().isPresent()) ? loadedSecrets.password().get() : new SecureString(new char[0]); + ) { + SecureSettings secrets = loadedSecrets.secrets(); + if (secureSettingsLoader.supportsSecurityAutoConfiguration()) { + env = autoConfigureSecurity(terminal, options, processInfo, env, password); + // reload or create the secrets + secrets = secureSettingsLoader.bootstrap(env, password); + } - if (keystore != null) { - keystore.decrypt(keystorePassword.getChars()); + // we should have a loaded or bootstrapped secure settings at this point + if (secrets == null) { + throw new UserException(ExitCodes.CONFIG, "Elasticsearch secure settings not configured"); } // install/remove plugins from elasticsearch-plugins.yml syncPlugins(terminal, env, processInfo); - ServerArgs args = createArgs(options, env, keystorePassword, processInfo); - this.server = startServer(terminal, processInfo, args, keystore); + ServerArgs args = createArgs(options, env, secrets, processInfo); + this.server = startServer(terminal, processInfo, args); } if (options.has(daemonizeOption)) { @@ -127,21 +136,17 @@ private void validateConfig(OptionSet options, Environment env) throws UserExcep } } - private static SecureString getKeystorePassword(KeyStoreWrapper keystore, Terminal terminal) { - if (keystore != null && keystore.hasPassword()) { - return new SecureString(terminal.readSecret(KeyStoreWrapper.PROMPT)); - } else { - return new SecureString(new char[0]); - } - } - - private Environment autoConfigureSecurity( + // Autoconfiguration of SecureSettings is currently only supported for KeyStore based secure settings + // package private for testing + Environment autoConfigureSecurity( Terminal terminal, OptionSet options, ProcessInfo processInfo, Environment env, SecureString keystorePassword ) throws Exception { + assert secureSettingsLoader(env) instanceof KeyStoreLoader; + String autoConfigLibs = "modules/x-pack-core,modules/x-pack-security,lib/tools/security-cli"; Command cmd = loadTool("auto-configure-node", autoConfigLibs); assert cmd instanceof EnvironmentAwareCommand; @@ -182,7 +187,8 @@ private Environment autoConfigureSecurity( return env; } - private void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception { + // package private for testing + void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception { String pluginCliLibs = "lib/tools/plugin-cli"; Command cmd = loadTool("sync-plugins", pluginCliLibs); assert cmd instanceof EnvironmentAwareCommand; @@ -201,7 +207,7 @@ private void validatePidFile(Path pidFile) throws UserException { } } - private ServerArgs createArgs(OptionSet options, Environment env, SecureString keystorePassword, ProcessInfo processInfo) + private ServerArgs createArgs(OptionSet options, Environment env, SecureSettings secrets, ProcessInfo processInfo) throws UserException { boolean daemonize = options.has(daemonizeOption); boolean quiet = options.has(quietOption); @@ -213,7 +219,7 @@ private ServerArgs createArgs(OptionSet options, Environment env, SecureString k } validatePidFile(pidFile); } - return new ServerArgs(daemonize, quiet, pidFile, keystorePassword, env.settings(), env.configFile()); + return new ServerArgs(daemonize, quiet, pidFile, secrets, env.settings(), env.configFile()); } @Override @@ -229,8 +235,13 @@ protected Command loadTool(String toolname, String libs) { } // protected to allow tests to override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args, KeyStoreWrapper keystore) - throws UserException { - return ServerProcess.start(terminal, processInfo, args, keystore); + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws UserException { + return ServerProcess.start(terminal, processInfo, args); + } + + // protected to allow tests to override + protected SecureSettingsLoader secureSettingsLoader(Environment env) { + // TODO: Use the environment configuration to decide what kind of secrets store to load + return new KeyStoreLoader(); } } diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java index 674f9f12c916..208ba9b5fe34 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java @@ -15,7 +15,6 @@ import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; @@ -37,7 +36,7 @@ /** * A helper to control a {@link Process} running the main Elasticsearch server. * - *

The process can be started by calling {@link #start(Terminal, ProcessInfo, ServerArgs, KeyStoreWrapper)}. + *

The process can be started by calling {@link #start(Terminal, ProcessInfo, ServerArgs)}. * The process is controlled by internally sending arguments and control signals on stdin, * and receiving control signals on stderr. The start method does not return until the * server is ready to process requests and has exited the bootstrap thread. @@ -67,8 +66,8 @@ public class ServerProcess { // this allows mocking the process building by tests interface OptionsBuilder { - List getJvmOptions(ServerArgs args, KeyStoreWrapper keyStoreWrapper, Path configDir, Path tmpDir, String envOptions) - throws InterruptedException, IOException, UserException; + List getJvmOptions(ServerArgs args, Path configDir, Path tmpDir, String envOptions) throws InterruptedException, + IOException, UserException; } // this allows mocking the process building by tests @@ -82,13 +81,11 @@ interface ProcessStarter { * @param terminal A terminal to connect the standard inputs and outputs to for the new process. * @param processInfo Info about the current process, for passing through to the subprocess. * @param args Arguments to the server process. - * @param keystore A keystore for accessing secrets. * @return A running server process that is ready for requests * @throws UserException If the process failed during bootstrap */ - public static ServerProcess start(Terminal terminal, ProcessInfo processInfo, ServerArgs args, KeyStoreWrapper keystore) - throws UserException { - return start(terminal, processInfo, args, keystore, JvmOptionsParser::determineJvmOptions, ProcessBuilder::start); + public static ServerProcess start(Terminal terminal, ProcessInfo processInfo, ServerArgs args) throws UserException { + return start(terminal, processInfo, args, JvmOptionsParser::determineJvmOptions, ProcessBuilder::start); } // package private so tests can mock options building and process starting @@ -96,7 +93,6 @@ static ServerProcess start( Terminal terminal, ProcessInfo processInfo, ServerArgs args, - KeyStoreWrapper keystore, OptionsBuilder optionsBuilder, ProcessStarter processStarter ) throws UserException { @@ -105,7 +101,7 @@ static ServerProcess start( boolean success = false; try { - jvmProcess = createProcess(args, keystore, processInfo, args.configDir(), optionsBuilder, processStarter); + jvmProcess = createProcess(args, processInfo, args.configDir(), optionsBuilder, processStarter); errorPump = new ErrorPumpThread(terminal.getErrorWriter(), jvmProcess.getErrorStream()); errorPump.start(); sendArgs(args, jvmProcess.getOutputStream()); @@ -184,7 +180,6 @@ private static void sendArgs(ServerArgs args, OutputStream processStdin) { // so the pump thread can complete, writing out the actual error. All we get here is the failure to write to // the process pipe, which isn't helpful to print. } - args.keystorePassword().close(); } private void sendShutdownMarker() { @@ -199,7 +194,6 @@ private void sendShutdownMarker() { private static Process createProcess( ServerArgs args, - KeyStoreWrapper keystore, ProcessInfo processInfo, Path configDir, OptionsBuilder optionsBuilder, @@ -211,7 +205,7 @@ private static Process createProcess( envVars.put("LIBFFI_TMPDIR", tempDir.toString()); } - List jvmOptions = optionsBuilder.getJvmOptions(args, keystore, configDir, tempDir, envVars.remove("ES_JAVA_OPTS")); + List jvmOptions = optionsBuilder.getJvmOptions(args, configDir, tempDir, envVars.remove("ES_JAVA_OPTS")); // also pass through distribution type jvmOptions.add("-Des.distribution.type=" + processInfo.sysprops().get("es.distribution.type")); diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java index f68a51de85c2..0d4edfc384d4 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmErgonomicsTests.java @@ -19,6 +19,7 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -179,4 +180,46 @@ public void testMaxDirectMemorySizeChoiceWhenSet() throws Exception { ); } + @SuppressWarnings("ConstantConditions") + public void testMissingOptionHandling() { + final Map g1GcOn = Map.of("UseG1GC", new JvmOption("true", "")); + final Map g1GcOff = Map.of("UseG1GC", new JvmOption("", "")); + + assertFalse(JvmErgonomics.tuneG1GCHeapRegion(Map.of(), false)); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCHeapRegion(Map.of(), true)).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCHeapRegion(g1GcOn, true)).getMessage(), + allOf(containsString("[G1HeapRegionSize]"), containsString("unexpectedly missing")) + ); + assertFalse(JvmErgonomics.tuneG1GCHeapRegion(g1GcOff, randomBoolean())); + + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCReservePercent(Map.of(), randomBoolean())).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCReservePercent(g1GcOn, randomBoolean())).getMessage(), + allOf(containsString("[G1ReservePercent]"), containsString("unexpectedly missing")) + ); + assertEquals(0, JvmErgonomics.tuneG1GCReservePercent(g1GcOff, randomBoolean())); + + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(Map.of())).getMessage(), + allOf(containsString("[UseG1GC]"), containsString("unexpectedly missing")) + ); + assertThat( + expectThrows(IllegalStateException.class, () -> JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(g1GcOn)).getMessage(), + allOf(containsString("[InitiatingHeapOccupancyPercent]"), containsString("unexpectedly missing")) + ); + assertFalse(JvmErgonomics.tuneG1GCInitiatingHeapOccupancyPercent(g1GcOff)); + + assertThat( + expectThrows(IllegalStateException.class, () -> new JvmOption("OptionName", null)).getMessage(), + allOf(containsString("could not determine the origin of JVM option [OptionName]"), containsString("unsupported")) + ); + } + } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java index 8acc94777bf0..5d63f29ac584 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; @@ -291,7 +292,7 @@ public void accept(final int lineNumber, final String line) { final int javaMajorVersion = randomIntBetween(8, Integer.MAX_VALUE); final int smallerJavaMajorVersion = randomIntBetween(7, javaMajorVersion - 1); - final String invalidRangeLine = String.format(Locale.ROOT, "%d:%d-XX:+UseG1GC", javaMajorVersion, smallerJavaMajorVersion); + final String invalidRangeLine = Strings.format("%d:%d-XX:+UseG1GC", javaMajorVersion, smallerJavaMajorVersion); try (StringReader sr = new StringReader(invalidRangeLine); BufferedReader br = new BufferedReader(sr)) { assertInvalidLines(br, Collections.singletonMap(1, invalidRangeLine)); } @@ -306,8 +307,8 @@ public void accept(final int lineNumber, final String line) { ); try (StringReader sr = new StringReader(numberFormatExceptionsLine); BufferedReader br = new BufferedReader(sr)) { final Map invalidLines = new HashMap<>(2); - invalidLines.put(1, formatted("%d:-XX:+UseG1GC", invalidLowerJavaMajorVersion)); - invalidLines.put(2, formatted("8-%d:-XX:+AggressiveOpts", invalidUpperJavaMajorVersion)); + invalidLines.put(1, Strings.format("%d:-XX:+UseG1GC", invalidLowerJavaMajorVersion)); + invalidLines.put(2, Strings.format("8-%d:-XX:+AggressiveOpts", invalidUpperJavaMajorVersion)); assertInvalidLines(br, invalidLines); } @@ -321,7 +322,7 @@ public void accept(final int lineNumber, final String line) { final int lowerBound = randomIntBetween(9, 16); final int upperBound = randomIntBetween(8, lowerBound - 1); - final String upperBoundGreaterThanLowerBound = String.format(Locale.ROOT, "%d-%d-XX:+UseG1GC", lowerBound, upperBound); + final String upperBoundGreaterThanLowerBound = Strings.format("%d-%d-XX:+UseG1GC", lowerBound, upperBound); try (StringReader sr = new StringReader(upperBoundGreaterThanLowerBound); BufferedReader br = new BufferedReader(sr)) { assertInvalidLines(br, Collections.singletonMap(1, upperBoundGreaterThanLowerBound)); } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java index 7a189563801e..f420834f84f3 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerCliTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -32,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Locale; +import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -46,9 +49,12 @@ public class ServerCliTests extends CommandTestCase { + private SecureSettingsLoader mockSecureSettingsLoader; + @Before public void setupMockConfig() throws IOException { Files.createFile(configDir.resolve("log4j2.properties")); + mockSecureSettingsLoader = null; } @Override @@ -277,7 +283,7 @@ public void assertKeystorePassword(String password) throws Exception { } } String expectedPassword = password == null ? "" : password; - argsValidator = args -> assertThat(args.keystorePassword().toString(), equalTo(expectedPassword)); + argsValidator = args -> assertThat(((KeyStoreWrapper) args.secrets()).hasPassword(), equalTo(hasPassword)); autoConfigCallback = (t, options, env, processInfo) -> { char[] gotPassword = t.readSecret(""); assertThat(gotPassword, equalTo(expectedPassword.toCharArray())); @@ -314,6 +320,60 @@ public void testServerExitsNonZero() throws Exception { assertThat(exitCode, equalTo(140)); } + public void testSecureSettingsLoaderChoice() throws Exception { + var loader = loadWithMockSecureSettingsLoader(); + assertTrue(loader.loaded); + // the mock loader doesn't support autoconfigure, no need to bootstrap a keystore + assertFalse(loader.bootstrapped); + // assert that we ran the code to verify the environment + assertTrue(loader.verifiedEnv); + } + + public void testSecureSettingsLoaderWithPassword() throws Exception { + var loader = setupMockKeystoreLoader(); + assertKeystorePassword("aaaaaaaaaaaaaaaaaa"); + assertTrue(loader.loaded); + assertTrue(loader.bootstrapped); + // the password we read should match what we passed in + assertEquals("aaaaaaaaaaaaaaaaaa", loader.password); + // after the command the secrets password is closed + assertEquals( + "SecureString has already been closed", + expectThrows(IllegalStateException.class, () -> loader.secrets.password().get().getChars()).getMessage() + ); + } + + public void testSecureSettingsLoaderWithEmptyPassword() throws Exception { + var loader = setupMockKeystoreLoader(); + assertKeystorePassword(""); + assertTrue(loader.loaded); + assertTrue(loader.bootstrapped); + assertEquals("", loader.password); + } + + public void testSecureSettingsLoaderWithNullPassword() throws Exception { + var loader = setupMockKeystoreLoader(); + assertKeystorePassword(null); // no keystore exists + assertTrue(loader.loaded); + assertTrue(loader.bootstrapped); + assertEquals("", loader.password); + } + + private MockSecureSettingsLoader loadWithMockSecureSettingsLoader() throws Exception { + var loader = new MockSecureSettingsLoader(); + this.mockSecureSettingsLoader = loader; + Command command = newCommand(); + command.main(new String[0], terminal, new ProcessInfo(sysprops, envVars, esHomeDir)); + command.close(); + return loader; + } + + private KeystoreSecureSettingsLoader setupMockKeystoreLoader() { + var loader = new KeystoreSecureSettingsLoader(); + this.mockSecureSettingsLoader = loader; + return loader; + } + interface AutoConfigMethod { void autoconfig(Terminal terminal, OptionSet options, Environment env, ProcessInfo processInfo) throws UserException; } @@ -436,13 +496,120 @@ protected Command loadTool(String toolname, String libs) { } @Override - protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args, KeyStoreWrapper keystore) { + Environment autoConfigureSecurity( + Terminal terminal, + OptionSet options, + ProcessInfo processInfo, + Environment env, + SecureString keystorePassword + ) throws Exception { + if (mockSecureSettingsLoader != null && mockSecureSettingsLoader.supportsSecurityAutoConfiguration() == false) { + fail("We shouldn't be calling auto configure on loaders that don't support it"); + } + return super.autoConfigureSecurity(terminal, options, processInfo, env, keystorePassword); + } + + @Override + protected ServerProcess startServer(Terminal terminal, ProcessInfo processInfo, ServerArgs args) { if (argsValidator != null) { argsValidator.accept(args); } mockServer.reset(); return mockServer; } + + @Override + void syncPlugins(Terminal terminal, Environment env, ProcessInfo processInfo) throws Exception { + if (mockSecureSettingsLoader != null && mockSecureSettingsLoader instanceof MockSecureSettingsLoader mock) { + mock.verifiedEnv = true; + // equals as a pointer, environment shouldn't be changed if autoconfigure is not supported + assertFalse(mockSecureSettingsLoader.supportsSecurityAutoConfiguration()); + assertTrue(mock.environment == env); + } + + super.syncPlugins(terminal, env, processInfo); + } + + @Override + protected SecureSettingsLoader secureSettingsLoader(Environment env) { + if (mockSecureSettingsLoader != null) { + return mockSecureSettingsLoader; + } + + return new KeystoreSecureSettingsLoader(); + } }; } + + static class MockSecureSettingsLoader implements SecureSettingsLoader { + boolean loaded = false; + LoadedSecrets secrets = null; + String password = null; + boolean bootstrapped = false; + Environment environment = null; + boolean verifiedEnv = false; + + @Override + public SecureSettingsLoader.LoadedSecrets load(Environment environment, Terminal terminal) throws IOException { + loaded = true; + // Stash the environment pointer, so we can compare it. Environment shouldn't be changed for + // loaders that don't autoconfigure. + this.environment = environment; + + SecureString password = null; + + if (terminal.getReader().ready() == false) { + this.password = null; + } else { + password = new SecureString(terminal.readSecret("Enter a password")); + this.password = password.toString(); + } + + secrets = new SecureSettingsLoader.LoadedSecrets( + KeyStoreWrapper.create(), + password == null ? Optional.empty() : Optional.of(password) + ); + + return secrets; + } + + @Override + public SecureSettings bootstrap(Environment environment, SecureString password) throws Exception { + fail("Bootstrap shouldn't be called for loaders that cannot be auto-configured"); + bootstrapped = true; + return KeyStoreWrapper.create(); + } + + @Override + public boolean supportsSecurityAutoConfiguration() { + return false; + } + } + + static class KeystoreSecureSettingsLoader extends KeyStoreLoader { + boolean loaded = false; + LoadedSecrets secrets = null; + String password = null; + boolean bootstrapped = false; + + @Override + public LoadedSecrets load(Environment environment, Terminal terminal) throws Exception { + var result = super.load(environment, terminal); + loaded = true; + secrets = result; + password = result.password().get().toString(); + + return result; + } + + @Override + public SecureSettings bootstrap(Environment environment, SecureString password) throws Exception { + this.bootstrapped = true; + // make sure we don't fail in fips mode when we run with an empty password + if (inFipsJvm() && (password == null || password.isEmpty())) { + return KeyStoreWrapper.create(); + } + return super.bootstrap(environment, password); + } + } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java index f0fa37227119..c8dfb84363d6 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/ServerProcessTests.java @@ -15,7 +15,8 @@ import org.elasticsearch.cli.ProcessInfo; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.ESTestCase; @@ -69,6 +70,7 @@ public class ServerProcessTests extends ESTestCase { ProcessValidator processValidator; MainMethod mainCallback; MockElasticsearchProcess process; + SecureSettings secrets; interface MainMethod { void main(ServerArgs args, InputStream stdin, PrintStream stderr, AtomicInteger exitCode) throws IOException; @@ -78,9 +80,9 @@ interface ProcessValidator { void validate(ProcessBuilder processBuilder) throws IOException; } - void runForeground() throws Exception { + int runForeground() throws Exception { var server = startProcess(false, false, ""); - server.waitFor(); + return server.waitFor(); } @Before @@ -92,9 +94,10 @@ public void resetEnv() { envVars.clear(); esHomeDir = createTempDir(); nodeSettings = Settings.builder(); - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> new ArrayList<>(); + optionsBuilder = (args, configDir, tmpDir, envOptions) -> new ArrayList<>(); processValidator = null; mainCallback = null; + secrets = KeyStoreWrapper.create(); } @AfterClass @@ -192,8 +195,7 @@ public Process destroyForcibly() { ServerProcess startProcess(boolean daemonize, boolean quiet, String keystorePassword) throws Exception { var pinfo = new ProcessInfo(Map.copyOf(sysprops), Map.copyOf(envVars), esHomeDir); - SecureString password = new SecureString(keystorePassword.toCharArray()); - var args = new ServerArgs(daemonize, quiet, null, password, nodeSettings.build(), esHomeDir.resolve("config")); + var args = new ServerArgs(daemonize, quiet, null, secrets, nodeSettings.build(), esHomeDir.resolve("config")); ServerProcess.ProcessStarter starter = pb -> { if (processValidator != null) { processValidator.validate(pb); @@ -201,7 +203,7 @@ ServerProcess startProcess(boolean daemonize, boolean quiet, String keystorePass process = new MockElasticsearchProcess(); return process; }; - return ServerProcess.start(terminal, pinfo, args, null, optionsBuilder, starter); + return ServerProcess.start(terminal, pinfo, args, optionsBuilder, starter); } public void testProcessBuilder() throws Exception { @@ -228,22 +230,12 @@ public void testPid() throws Exception { public void testBootstrapError() throws Exception { mainCallback = (args, stdin, stderr, exitCode) -> { - stderr.println(BootstrapInfo.USER_EXCEPTION_MARKER + "a bootstrap exception"); + stderr.println("a bootstrap exception"); exitCode.set(ExitCodes.CONFIG); }; - var e = expectThrows(UserException.class, () -> runForeground()); - assertThat(e.exitCode, equalTo(ExitCodes.CONFIG)); - assertThat(e.getMessage(), equalTo("a bootstrap exception")); - } - - public void testUserError() throws Exception { - mainCallback = (args, stdin, stderr, exitCode) -> { - stderr.println(BootstrapInfo.USER_EXCEPTION_MARKER + "a user exception"); - exitCode.set(ExitCodes.USAGE); - }; - var e = expectThrows(UserException.class, () -> runForeground()); - assertThat(e.exitCode, equalTo(ExitCodes.USAGE)); - assertThat(e.getMessage(), equalTo("a user exception")); + int exitCode = runForeground(); + assertThat(exitCode, equalTo(ExitCodes.CONFIG)); + assertThat(terminal.getErrorOutput(), containsString("a bootstrap exception")); } public void testStartError() throws Exception { @@ -253,9 +245,7 @@ public void testStartError() throws Exception { } public void testOptionsBuildingInterrupted() throws Exception { - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { - throw new InterruptedException("interrupted while get jvm options"); - }; + optionsBuilder = (args, configDir, tmpDir, envOptions) -> { throw new InterruptedException("interrupted while get jvm options"); }; var e = expectThrows(RuntimeException.class, () -> runForeground()); assertThat(e.getCause().getMessage(), equalTo("interrupted while get jvm options")); } @@ -279,7 +269,7 @@ public void testLibffiEnv() throws Exception { } public void testTempDir() throws Exception { - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, configDir, tmpDir, envOptions) -> { assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); assertThat(tmpDir.getFileName().toString(), startsWith("elasticsearch-")); return new ArrayList<>(); @@ -291,7 +281,7 @@ public void testTempDirWindows() throws Exception { Path baseTmpDir = createTempDir(); sysprops.put("os.name", "Windows 10"); sysprops.put("java.io.tmpdir", baseTmpDir.toString()); - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, configDir, tmpDir, envOptions) -> { assertThat(tmpDir.toString(), Files.exists(tmpDir), is(true)); assertThat(tmpDir.getFileName().toString(), equalTo("elasticsearch")); assertThat(tmpDir.getParent().toString(), equalTo(baseTmpDir.toString())); @@ -303,7 +293,7 @@ public void testTempDirWindows() throws Exception { public void testTempDirOverride() throws Exception { Path customTmpDir = createTempDir(); envVars.put("ES_TMPDIR", customTmpDir.toString()); - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, configDir, tmpDir, envOptions) -> { assertThat(tmpDir.toString(), equalTo(customTmpDir.toString())); return new ArrayList<>(); }; @@ -329,7 +319,7 @@ public void testTempDirOverrideNotADirectory() throws Exception { public void testCustomJvmOptions() throws Exception { envVars.put("ES_JAVA_OPTS", "-Dmyoption=foo"); - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> { + optionsBuilder = (args, configDir, tmpDir, envOptions) -> { assertThat(envOptions, equalTo("-Dmyoption=foo")); return new ArrayList<>(); }; @@ -338,7 +328,7 @@ public void testCustomJvmOptions() throws Exception { } public void testCommandLineSysprops() throws Exception { - optionsBuilder = (args, keystore, configDir, tmpDir, envOptions) -> List.of("-Dfoo1=bar", "-Dfoo2=baz"); + optionsBuilder = (args, configDir, tmpDir, envOptions) -> List.of("-Dfoo1=bar", "-Dfoo2=baz"); processValidator = pb -> { assertThat(pb.command(), contains("-Dfoo1=bar")); assertThat(pb.command(), contains("-Dfoo2=bar")); diff --git a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java index 637bd09eb2ce..5d1e67bc92fb 100644 --- a/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java +++ b/distribution/tools/windows-service-cli/src/main/java/org/elasticsearch/windows/service/WindowsServiceDaemon.java @@ -14,6 +14,7 @@ import org.elasticsearch.cli.ProcessInfo; import org.elasticsearch.cli.Terminal; import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; import org.elasticsearch.server.cli.ServerProcess; @@ -34,9 +35,12 @@ class WindowsServiceDaemon extends EnvironmentAwareCommand { @Override public void execute(Terminal terminal, OptionSet options, Environment env, ProcessInfo processInfo) throws Exception { - var args = new ServerArgs(false, true, null, new SecureString(""), env.settings(), env.configFile()); - this.server = ServerProcess.start(terminal, processInfo, args, null); - // start does not return until the server is ready, and we do not wait for the process + // the Windows service daemon doesn't support secure settings implementations other than the keystore + try (var loadedSecrets = KeyStoreWrapper.bootstrap(env.configFile(), () -> new SecureString(new char[0]))) { + var args = new ServerArgs(false, true, null, loadedSecrets, env.settings(), env.configFile()); + this.server = ServerProcess.start(terminal, processInfo, args); + // start does not return until the server is ready, and we do not wait for the process + } } @Override diff --git a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java index ec4b27c779d6..7753e09bd9f4 100644 --- a/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java +++ b/distribution/tools/windows-service-cli/src/test/java/org/elasticsearch/windows/service/WindowsServiceInstallCommandTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cli.Command; import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.core.Strings; import org.junit.Before; import java.io.IOException; @@ -153,13 +154,13 @@ public void testPidFile() throws Exception { } public void testDisplayName() throws Exception { - assertServiceArgs(Map.of("DisplayName", formatted("\"Elasticsearch %s (elasticsearch-service-x64)\"", Version.CURRENT))); + assertServiceArgs(Map.of("DisplayName", Strings.format("\"Elasticsearch %s (elasticsearch-service-x64)\"", Version.CURRENT))); envVars.put("SERVICE_DISPLAY_NAME", "my service name"); assertServiceArgs(Map.of("DisplayName", "\"my service name\"")); } public void testDescription() throws Exception { - String defaultDescription = formatted("\"Elasticsearch %s Windows Service - https://elastic.co\"", Version.CURRENT); + String defaultDescription = Strings.format("\"Elasticsearch %s Windows Service - https://elastic.co\"", Version.CURRENT); assertServiceArgs(Map.of("Description", defaultDescription)); envVars.put("SERVICE_DESCRIPTION", "my description"); assertServiceArgs(Map.of("Description", "\"my description\"")); diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index ce5a7dce1531..8e5464f0b388 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.4.1 -:lucene_version_path: 9_4_1 +:lucene_version: 9.5.0 +:lucene_version_path: 9_5_0 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/85656.yaml b/docs/changelog/85656.yaml new file mode 100644 index 000000000000..3ca31b32c6f6 --- /dev/null +++ b/docs/changelog/85656.yaml @@ -0,0 +1,6 @@ +pr: 85656 +summary: Instrument Weight#count in ProfileWeight +area: Search +type: enhancement +issues: + - 85203 diff --git a/docs/changelog/86110.yaml b/docs/changelog/86110.yaml deleted file mode 100644 index 376cadaa5660..000000000000 --- a/docs/changelog/86110.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 86110 -summary: Add LimitedOffsetsEnum to Limited offset token -area: Search -type: enhancement -issues: - - 86109 diff --git a/docs/changelog/86323.yaml b/docs/changelog/86323.yaml deleted file mode 100644 index 4abf25204c37..000000000000 --- a/docs/changelog/86323.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 86323 -summary: Bulk merge field-caps responses using mapping hash -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/88088.yaml b/docs/changelog/88088.yaml deleted file mode 100644 index fedfd4942ae3..000000000000 --- a/docs/changelog/88088.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 88088 -summary: Move SpatialUtils to geo library -area: Geo -type: enhancement -issues: - - 86607 diff --git a/docs/changelog/88600.yaml b/docs/changelog/88600.yaml deleted file mode 100644 index f1d9d8a8730a..000000000000 --- a/docs/changelog/88600.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 88600 -summary: Make `categorize_text` aggregation GA -area: Machine Learning -type: feature -issues: [] -highlight: - title: Make `categorize_text` aggregation GA - body: The `categorize_text` aggregation has been moved from - technical preview to general availability. - notable: true diff --git a/docs/changelog/88686.yaml b/docs/changelog/88686.yaml new file mode 100644 index 000000000000..1346e725df1d --- /dev/null +++ b/docs/changelog/88686.yaml @@ -0,0 +1,6 @@ +pr: 88686 +summary: "Fix: do not allow map key types other than String" +area: Aggregations +type: bug +issues: + - 66057 diff --git a/docs/changelog/88952.yaml b/docs/changelog/88952.yaml deleted file mode 100644 index 2d16663cbcf1..000000000000 --- a/docs/changelog/88952.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 88952 -summary: Prevalidate node removal API -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/89216.yaml b/docs/changelog/89216.yaml deleted file mode 100644 index ee7dc37dbdf3..000000000000 --- a/docs/changelog/89216.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 89216 -summary: Centroid aggregation for cartesian points and shapes -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/89238.yaml b/docs/changelog/89238.yaml deleted file mode 100644 index 7f434db2aefe..000000000000 --- a/docs/changelog/89238.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 89238 -summary: "ILM: Get policy support wildcard name" -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/89461.yaml b/docs/changelog/89461.yaml deleted file mode 100644 index f99afbabdbd4..000000000000 --- a/docs/changelog/89461.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 89461 -summary: Add certificate start/expiry dates to SSL Diagnostic message -area: TLS -type: enhancement -issues: [] diff --git a/docs/changelog/89735.yaml b/docs/changelog/89735.yaml deleted file mode 100644 index 8fd69d53b379..000000000000 --- a/docs/changelog/89735.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 89735 -summary: Operator/ingest -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/89950.yaml b/docs/changelog/89950.yaml deleted file mode 100644 index 2027209fcb2a..000000000000 --- a/docs/changelog/89950.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 89950 -summary: "Synthetic _source: support `field` in many cases" -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/89965.yaml b/docs/changelog/89965.yaml deleted file mode 100644 index 4616f2806ba0..000000000000 --- a/docs/changelog/89965.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 89965 -summary: "EQL sequences: support join on multi-values" -area: EQL -type: bug -issues: [] diff --git a/docs/changelog/90038.yaml b/docs/changelog/90038.yaml deleted file mode 100644 index 826b6f9284fe..000000000000 --- a/docs/changelog/90038.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90038 -summary: "Synthetic `_source`: `ignore_malformed` for `ip`" -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/90143.yaml b/docs/changelog/90143.yaml deleted file mode 100644 index 94124cba8fb5..000000000000 --- a/docs/changelog/90143.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90143 -summary: Operator/index templates -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/90196.yaml b/docs/changelog/90196.yaml deleted file mode 100644 index 8a8115ba6092..000000000000 --- a/docs/changelog/90196.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90196 -summary: "Synthetic `_source`: support `wildcard` field" -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/90200.yaml b/docs/changelog/90200.yaml deleted file mode 100644 index 36b2017ac712..000000000000 --- a/docs/changelog/90200.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90200 -summary: Add profiling information for knn vector queries -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/90282.yaml b/docs/changelog/90282.yaml deleted file mode 100644 index e08ace64533a..000000000000 --- a/docs/changelog/90282.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90282 -summary: Increase snaphot pool max size to 10 -area: Snapshot/Restore -type: enhancement -issues: - - 89608 diff --git a/docs/changelog/90287.yaml b/docs/changelog/90287.yaml deleted file mode 100644 index 954b90e61ece..000000000000 --- a/docs/changelog/90287.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90287 -summary: 'Revert "Remove `ImmutableOpenMap` from snapshot services"' -area: "Infra/Core" -type: regression -issues: [] diff --git a/docs/changelog/90296.yaml b/docs/changelog/90296.yaml deleted file mode 100644 index dd021ec08bf4..000000000000 --- a/docs/changelog/90296.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90296 -summary: Limit shard realocation retries -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/90298.yaml b/docs/changelog/90298.yaml deleted file mode 100644 index 549f690ed891..000000000000 --- a/docs/changelog/90298.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90298 -summary: Improve date math exclusions in expressions -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/90303.yaml b/docs/changelog/90303.yaml deleted file mode 100644 index 21292e424ad3..000000000000 --- a/docs/changelog/90303.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90303 -summary: Use `IndexOrDocValues` query for IP range queries -area: Search -type: enhancement -issues: - - 83658 diff --git a/docs/changelog/90346.yaml b/docs/changelog/90346.yaml deleted file mode 100644 index bf9388a49175..000000000000 --- a/docs/changelog/90346.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90346 -summary: Update error states from inside the main state executor -area: Infra/Core -type: bug -issues: - - 90337 diff --git a/docs/changelog/90381.yaml b/docs/changelog/90381.yaml deleted file mode 100644 index a7f467510be4..000000000000 --- a/docs/changelog/90381.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90381 -summary: aggregate metric double add a max min validation -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/90391.yaml b/docs/changelog/90391.yaml deleted file mode 100644 index 4565a9503439..000000000000 --- a/docs/changelog/90391.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90391 -summary: Remove resize index settings once shards are started -area: Recovery -type: enhancement -issues: - - 90127 diff --git a/docs/changelog/90399.yaml b/docs/changelog/90399.yaml deleted file mode 100644 index 485f9eeeedd9..000000000000 --- a/docs/changelog/90399.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 90399 -summary: Deprecate state field in /_cluster/reroute response -area: Allocation -type: deprecation -issues: [] -deprecation: - title: state field is deprecated in /_cluster/reroute response - area: REST API - details: |- - `state` field is deprecated in `/_cluster/reroute` response. Cluster state does not provide meaningful information - about the result of reroute/commands execution. There are no guarantees that this exact state would be applied. - impact: |- - Reroute API users should not rely on `state` field and instead use `explain` to request result of commands execution. diff --git a/docs/changelog/90425.yaml b/docs/changelog/90425.yaml deleted file mode 100644 index 6205900f8839..000000000000 --- a/docs/changelog/90425.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90425 -summary: Enhance nested depth tracking when parsing queries -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/90428.yaml b/docs/changelog/90428.yaml deleted file mode 100644 index cb65566f7576..000000000000 --- a/docs/changelog/90428.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90428 -summary: Support malformed numbers in synthetic `_source` -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/90447.yaml b/docs/changelog/90447.yaml new file mode 100644 index 000000000000..198bbb781565 --- /dev/null +++ b/docs/changelog/90447.yaml @@ -0,0 +1,5 @@ +pr: 90447 +summary: Add a TSDB rate aggregation +area: "TSDB" +type: feature +issues: [] diff --git a/docs/changelog/90460.yaml b/docs/changelog/90460.yaml deleted file mode 100644 index 57e70ac3aebe..000000000000 --- a/docs/changelog/90460.yaml +++ /dev/null @@ -1,10 +0,0 @@ -pr: 90460 -summary: Deprecate 'remove_binary' default of false for ingest attachment processor -area: Ingest Node -type: deprecation -issues: [] -deprecation: - title: Deprecate 'remove_binary' default of false for ingest attachment processor - area: CRUD - details: The default "remove_binary" option for the attachment processor will be changed from false to true in a later Elasticsearch release. This means that the binary file sent to Elasticsearch will not be retained. - impact: Users should update the "remove_binary" option to be explicitly true or false, instead of relying on the default value, so that no default value changes will affect Elasticsearch. diff --git a/docs/changelog/90482.yaml b/docs/changelog/90482.yaml deleted file mode 100644 index 61f3e42609ea..000000000000 --- a/docs/changelog/90482.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90482 -summary: Transport threads and `_hot_threads` -area: Infra/Core -type: enhancement -issues: - - 90334 diff --git a/docs/changelog/90494.yaml b/docs/changelog/90494.yaml deleted file mode 100644 index 9c73655f0f0c..000000000000 --- a/docs/changelog/90494.yaml +++ /dev/null @@ -1,16 +0,0 @@ -pr: 90494 -summary: Fix logstash loadavg (xpack cases) -area: Monitoring -type: bug -issues: [] -highlight: - title: "Stack Monitoring: Logstash load average type fixed for metricbeat collection" - body: |- - Previously, the templates for ingesting logstash load average using metricbeat were set to `long`. This provides only an integer graph. - - The type has been corrected to `half_float`. You can force a rollover to see the change immediately or wait for the next ILM rollover. - - [source,console] - ---- - POST .monitoring-logstash-8-mb/_rollover - ---- diff --git a/docs/changelog/90534.yaml b/docs/changelog/90534.yaml new file mode 100644 index 000000000000..c24735d94879 --- /dev/null +++ b/docs/changelog/90534.yaml @@ -0,0 +1,6 @@ +pr: 90534 +summary: Fix fransport handshake starting before tls handshake completes +area: Network +type: bug +issues: + - 77999 diff --git a/docs/changelog/90536.yaml b/docs/changelog/90536.yaml deleted file mode 100644 index b9ec1e33e96c..000000000000 --- a/docs/changelog/90536.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90536 -summary: Add profiling and documentation for dfs phase -area: Search -type: enhancement -issues: - - 89713 diff --git a/docs/changelog/90552.yaml b/docs/changelog/90552.yaml deleted file mode 100644 index ae294d2edd2d..000000000000 --- a/docs/changelog/90552.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90552 -summary: "Generate 'index.routing_path' from dynamic mapping templates" -area: TSDB -type: enhancement -issues: - - 90528 diff --git a/docs/changelog/90553.yaml b/docs/changelog/90553.yaml deleted file mode 100644 index d6cfdbc48e4f..000000000000 --- a/docs/changelog/90553.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90553 -summary: Upgrade XContent to Jackson 2.14.0 and enable Fast Double Parser -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/90585.yaml b/docs/changelog/90585.yaml deleted file mode 100644 index 77adbb6d274a..000000000000 --- a/docs/changelog/90585.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90585 -summary: Return docs when using nested mappings in archive indices -area: Search -type: enhancement -issues: - - 90523 diff --git a/docs/changelog/90589.yaml b/docs/changelog/90589.yaml deleted file mode 100644 index c0e0f8298d98..000000000000 --- a/docs/changelog/90589.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90589 -summary: Upgrade to log4j 2.19.0 -area: Infra/Logging -type: upgrade -issues: - - 90584 diff --git a/docs/changelog/90593.yaml b/docs/changelog/90593.yaml deleted file mode 100644 index 23f8acc7fa38..000000000000 --- a/docs/changelog/90593.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90593 -summary: Test downsample runtime fields and security -area: Rollup -type: enhancement -issues: [] diff --git a/docs/changelog/90604.yaml b/docs/changelog/90604.yaml deleted file mode 100644 index 94c913cc1c12..000000000000 --- a/docs/changelog/90604.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90604 -summary: Upgrade to Netty 4.1.82.Final -area: Network -type: upgrade -issues: [] diff --git a/docs/changelog/90612.yaml b/docs/changelog/90612.yaml deleted file mode 100644 index c69cc1fa3d8e..000000000000 --- a/docs/changelog/90612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90612 -summary: Make `knn` search requests fully cancellable -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/90649.yaml b/docs/changelog/90649.yaml deleted file mode 100644 index 9a377456a088..000000000000 --- a/docs/changelog/90649.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90649 -summary: "[Stack Monitoring] Update ES module mappings" -area: Monitoring -type: bug -issues: [] diff --git a/docs/changelog/90675.yaml b/docs/changelog/90675.yaml deleted file mode 100644 index 6fda273b6b08..000000000000 --- a/docs/changelog/90675.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90675 -summary: Provide additional information about anomaly score factors -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/90688.yaml b/docs/changelog/90688.yaml deleted file mode 100644 index f848ca01a7be..000000000000 --- a/docs/changelog/90688.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 90688 -summary: Fixing a race condition in `EnrichCoordinatorProxyAction` that can leave - an item stuck in its queue -area: Ingest Node -type: bug -issues: - - 90598 diff --git a/docs/changelog/90723.yaml b/docs/changelog/90723.yaml deleted file mode 100644 index 7ebf4c21f7ee..000000000000 --- a/docs/changelog/90723.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90723 -summary: Add a regex to the output of the `categorize_text` aggregation -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/90724.yaml b/docs/changelog/90724.yaml deleted file mode 100644 index bafe4c28f264..000000000000 --- a/docs/changelog/90724.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90724 -summary: Fix `TransportMasterNodeAction` holding a CS reference needlessly -area: Cluster Coordination -type: bug -issues: - - 89220 diff --git a/docs/changelog/90728.yaml b/docs/changelog/90728.yaml deleted file mode 100644 index 79efcd0050ef..000000000000 --- a/docs/changelog/90728.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90728 -summary: Add api to update trained model deployment `number_of_allocations` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/90760.yaml b/docs/changelog/90760.yaml deleted file mode 100644 index 6656ed2ce041..000000000000 --- a/docs/changelog/90760.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90760 -summary: Add a health section to transform stats -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/90764.yaml b/docs/changelog/90764.yaml deleted file mode 100644 index c8f759ef8200..000000000000 --- a/docs/changelog/90764.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90764 -summary: Allow overriding timestamp field to null in file structure finder -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/90768.yaml b/docs/changelog/90768.yaml deleted file mode 100644 index f011973a0d99..000000000000 --- a/docs/changelog/90768.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90768 -summary: Keep track of average shard write load -area: CRUD -type: enhancement -issues: - - 90102 diff --git a/docs/changelog/90774.yaml b/docs/changelog/90774.yaml deleted file mode 100644 index 9fb5d11ea15f..000000000000 --- a/docs/changelog/90774.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90774 -summary: Add support for indexing byte-sized knn vectors -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/90805.yaml b/docs/changelog/90805.yaml deleted file mode 100644 index b4147ad94004..000000000000 --- a/docs/changelog/90805.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90805 -summary: Example stable plugin -area: Infra/Plugins -type: enhancement -issues: [] diff --git a/docs/changelog/90806.yaml b/docs/changelog/90806.yaml deleted file mode 100644 index e97d8ae86476..000000000000 --- a/docs/changelog/90806.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90806 -summary: Don't create IndexCaps objects when recording unmapped fields -area: Mapping -type: enhancement -issues: [90796] diff --git a/docs/changelog/90807.yaml b/docs/changelog/90807.yaml deleted file mode 100644 index 0fef9f51dbc6..000000000000 --- a/docs/changelog/90807.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90807 -summary: Check `NodesShutdownMetadata` type before assuming restart -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/90812.yaml b/docs/changelog/90812.yaml deleted file mode 100644 index d440c9685317..000000000000 --- a/docs/changelog/90812.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90812 -summary: Alias timestamp to @timestamp in anomaly detection results index -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/90822.yaml b/docs/changelog/90822.yaml deleted file mode 100644 index 124d053712ab..000000000000 --- a/docs/changelog/90822.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90822 -summary: Fix handling empty key case in the terms aggregation -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/90835.yaml b/docs/changelog/90835.yaml deleted file mode 100644 index 8eac3367847b..000000000000 --- a/docs/changelog/90835.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 90835 -summary: "Make `extendedPlugins,` `HasNativeController` and `moduleName` optional\ - \ in plugin descriptor" -area: Infra/Plugins -type: enhancement -issues: [] diff --git a/docs/changelog/90870.yaml b/docs/changelog/90870.yaml deleted file mode 100644 index ce68c4cdc6e2..000000000000 --- a/docs/changelog/90870.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90870 -summary: Create placeholder plugin when loading stable plugins -area: Infra/Plugins -type: enhancement -issues: [] diff --git a/docs/changelog/90902.yaml b/docs/changelog/90902.yaml deleted file mode 100644 index ecebdd1feca9..000000000000 --- a/docs/changelog/90902.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90902 -summary: Index expression exclusions never trigger "not found" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/90931.yaml b/docs/changelog/90931.yaml deleted file mode 100644 index 045ca8a1659b..000000000000 --- a/docs/changelog/90931.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90931 -summary: Refactor enrich maintenance coordination logic -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/90977.yaml b/docs/changelog/90977.yaml new file mode 100644 index 000000000000..995a84299920 --- /dev/null +++ b/docs/changelog/90977.yaml @@ -0,0 +1,6 @@ +pr: 90977 +summary: '`TransportListTaskAction:` wait for tasks to finish asynchronously' +area: Task Management +type: enhancement +issues: + - 89564 diff --git a/docs/changelog/90989.yaml b/docs/changelog/90989.yaml deleted file mode 100644 index 668c075d3148..000000000000 --- a/docs/changelog/90989.yaml +++ /dev/null @@ -1,16 +0,0 @@ -pr: 90989 -summary: Deprecate silently ignoring type, fields, copy_to and boost in metadata field definition -area: Mapping -type: deprecation -issues: - - 35389 -deprecation: - title: Deprecate silently ignoring type, fields, copy_to and boost in metadata field definition - area: Mapping - details: Unsupported parameters like type, fields, copy_to and boost are silently ignored when - provided as part of the configuration of a metadata field in the index mappings. They will - cause a deprecation warning when used in the mappings for indices that are created from 8.6 - onwards. - impact: To resolve the deprecation warning, remove the mention of type, fields, copy_to or boost - from any metadata field definition as part of index mappings. They take no effect so removing - them won't have any impact besides resolving the deprecation warning. diff --git a/docs/changelog/90993.yaml b/docs/changelog/90993.yaml deleted file mode 100644 index 209522227eeb..000000000000 --- a/docs/changelog/90993.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 90993 -summary: Add "index" and "search" node roles with feature flag and setting -area: Distributed -type: feature -issues: [] diff --git a/docs/changelog/91006.yaml b/docs/changelog/91006.yaml deleted file mode 100644 index 56ef752f1895..000000000000 --- a/docs/changelog/91006.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91006 -summary: Fix potential issue with graph api's timed out field in response -area: Graph -type: bug -issues: [] diff --git a/docs/changelog/91019.yaml b/docs/changelog/91019.yaml deleted file mode 100644 index 5753ab4d7fba..000000000000 --- a/docs/changelog/91019.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91019 -summary: Store write load in the `IndexMetadata` during data streams rollovers -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/91021.yaml b/docs/changelog/91021.yaml deleted file mode 100644 index 5e22c1c76fa4..000000000000 --- a/docs/changelog/91021.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91021 -summary: Tie snapshot speed to node bandwidth settings -area: Snapshot/Restore -type: enhancement -issues: - - 57023 diff --git a/docs/changelog/91038.yaml b/docs/changelog/91038.yaml deleted file mode 100644 index ce0a8d00abb4..000000000000 --- a/docs/changelog/91038.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91038 -summary: '`DesiredBalance:` expose it via _internal/desired_balance' -area: Allocation -type: enhancement -issues: - - 90583 diff --git a/docs/changelog/91045.yaml b/docs/changelog/91045.yaml deleted file mode 100644 index ca5bc4240d9f..000000000000 --- a/docs/changelog/91045.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91045 -summary: Support `aggregate_metric_double` field type in transform aggregations -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/91047.yaml b/docs/changelog/91047.yaml deleted file mode 100644 index 73cbf0c402d8..000000000000 --- a/docs/changelog/91047.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91047 -summary: Security remove datemath special handling -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/91073.yaml b/docs/changelog/91073.yaml deleted file mode 100644 index 30d41b6dc0b7..000000000000 --- a/docs/changelog/91073.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91073 -summary: GeoBoundsAggregations reject sub aggregations -area: Aggregations -type: bug -issues: - - 91072 diff --git a/docs/changelog/91116.yaml b/docs/changelog/91116.yaml new file mode 100644 index 000000000000..ed35fa364e47 --- /dev/null +++ b/docs/changelog/91116.yaml @@ -0,0 +1,6 @@ +pr: 91116 +summary: Add from parameter to Transform Start API +area: Transform +type: enhancement +issues: + - 88646 diff --git a/docs/changelog/91137.yaml b/docs/changelog/91137.yaml deleted file mode 100644 index 3f54a1599a85..000000000000 --- a/docs/changelog/91137.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91137 -summary: Add a filter parameter to frequent items -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/91140.yaml b/docs/changelog/91140.yaml deleted file mode 100644 index 733e7bfdda46..000000000000 --- a/docs/changelog/91140.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91140 -summary: Improve H3#hexRing logic and add H3#areNeighborCells method -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/91184.yaml b/docs/changelog/91184.yaml deleted file mode 100644 index 6bacb199d8c0..000000000000 --- a/docs/changelog/91184.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91184 -summary: Add fielddata and scripting support for byte-sized vectors -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/91195.yaml b/docs/changelog/91195.yaml deleted file mode 100644 index f42ed122a3f0..000000000000 --- a/docs/changelog/91195.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91195 -summary: Allow different decay values depending on the score function -area: Search -type: bug -issues: - - 78887 diff --git a/docs/changelog/91231.yaml b/docs/changelog/91231.yaml deleted file mode 100644 index 1ca88ff84d8d..000000000000 --- a/docs/changelog/91231.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91231 -summary: Fix index expression options for requests with a single name or pattern -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/91234.yaml b/docs/changelog/91234.yaml deleted file mode 100644 index 1e25efe5aa30..000000000000 --- a/docs/changelog/91234.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91234 -summary: Low priority trained model deployments -area: Machine Learning -type: enhancement -issues: - - 91024 diff --git a/docs/changelog/91238.yaml b/docs/changelog/91238.yaml new file mode 100644 index 000000000000..cfac17b27e60 --- /dev/null +++ b/docs/changelog/91238.yaml @@ -0,0 +1,7 @@ +pr: 91238 +summary: Avoiding `BulkProcessor` deadlock in ILMHistoryStore +area: ILM+SLM +type: bug +issues: + - 68468 + - 50440 diff --git a/docs/changelog/91243.yaml b/docs/changelog/91243.yaml deleted file mode 100644 index ac0433b51769..000000000000 --- a/docs/changelog/91243.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91243 -summary: More actionable error for ancient indices -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/91256.yaml b/docs/changelog/91256.yaml new file mode 100644 index 000000000000..3c829465073b --- /dev/null +++ b/docs/changelog/91256.yaml @@ -0,0 +1,6 @@ +pr: 91256 +summary: Prevalidate node removal API (pt. 2) +area: Allocation +type: enhancement +issues: + - 87776 diff --git a/docs/changelog/91271.yaml b/docs/changelog/91271.yaml deleted file mode 100644 index 43ad13596bc1..000000000000 --- a/docs/changelog/91271.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91271 -summary: Upgrade to Netty 4.1.84 -area: Network -type: upgrade -issues: [] diff --git a/docs/changelog/91296.yaml b/docs/changelog/91296.yaml deleted file mode 100644 index 83889a6929ae..000000000000 --- a/docs/changelog/91296.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91296 -summary: Allow `model_aliases` to be used with Pytorch trained models -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/91298.yaml b/docs/changelog/91298.yaml deleted file mode 100644 index 77077b3b0b29..000000000000 --- a/docs/changelog/91298.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91298 -summary: Support `cartesian_bounds` aggregation on point and shape -area: Geo -type: enhancement -issues: - - 90157 diff --git a/docs/changelog/91299.yaml b/docs/changelog/91299.yaml deleted file mode 100644 index 9d50b2244b4a..000000000000 --- a/docs/changelog/91299.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91299 -summary: Add ability to filter and sort buckets by `change_point` numeric values -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/91306.yaml b/docs/changelog/91306.yaml deleted file mode 100644 index 4170ecd1973a..000000000000 --- a/docs/changelog/91306.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91306 -summary: Rename `NamedComponent` name parameter to value -area: Infra/Plugins -type: enhancement -issues: [] diff --git a/docs/changelog/91312.yaml b/docs/changelog/91312.yaml deleted file mode 100644 index e5628ad74190..000000000000 --- a/docs/changelog/91312.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91312 -summary: EQL samples -area: EQL -type: feature -issues: [] diff --git a/docs/changelog/91328.yaml b/docs/changelog/91328.yaml deleted file mode 100644 index b225d9619ac8..000000000000 --- a/docs/changelog/91328.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91328 -summary: Consolidate field name validation when parsing mappings and documents -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/91338.yaml b/docs/changelog/91338.yaml deleted file mode 100644 index 50c08b3ce5f8..000000000000 --- a/docs/changelog/91338.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91338 -summary: Extend systemd startup timeout to 900s -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/91343.yaml b/docs/changelog/91343.yaml deleted file mode 100644 index 27a59ec487d0..000000000000 --- a/docs/changelog/91343.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91343 -summary: Introduce desired-balance allocator -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/91347.yaml b/docs/changelog/91347.yaml deleted file mode 100644 index 254f5474f1e7..000000000000 --- a/docs/changelog/91347.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91347 -summary: "Use an explicit null check for null receivers in painless, rather than an NPE" -area: Infra/Scripting -type: enhancement -issues: [91236] diff --git a/docs/changelog/91362.yaml b/docs/changelog/91362.yaml deleted file mode 100644 index a34bec585229..000000000000 --- a/docs/changelog/91362.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91362 -summary: "Introduce a phase to use String.equals on constant strings, rather than\ - \ def equality" -area: Infra/Core -type: enhancement -issues: [91235] diff --git a/docs/changelog/91364.yaml b/docs/changelog/91364.yaml deleted file mode 100644 index a2da37f99a37..000000000000 --- a/docs/changelog/91364.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91364 -summary: Fix `BytesRefArray` on append empty `BytesRef` -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/91367.yaml b/docs/changelog/91367.yaml new file mode 100644 index 000000000000..2ec5842ee132 --- /dev/null +++ b/docs/changelog/91367.yaml @@ -0,0 +1,5 @@ +pr: 91367 +summary: Deserialize responses on the handling thread-pool +area: Network +type: enhancement +issues: [] diff --git a/docs/changelog/91398.yaml b/docs/changelog/91398.yaml new file mode 100644 index 000000000000..2127e983cbef --- /dev/null +++ b/docs/changelog/91398.yaml @@ -0,0 +1,5 @@ +pr: 91398 +summary: Deduplicate Heavy CCR Repository CS Requests +area: CCR +type: bug +issues: [] diff --git a/docs/changelog/91409.yaml b/docs/changelog/91409.yaml deleted file mode 100644 index 0fc8febae6d7..000000000000 --- a/docs/changelog/91409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91409 -summary: Remove version limitations for CCS -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/91413.yaml b/docs/changelog/91413.yaml new file mode 100644 index 000000000000..4a6d68004920 --- /dev/null +++ b/docs/changelog/91413.yaml @@ -0,0 +1,5 @@ +pr: 91413 +summary: "[Fleet] Add files and files data index templates and ILM policies" +area: Infra/Plugins +type: feature +issues: [] diff --git a/docs/changelog/91425.yaml b/docs/changelog/91425.yaml deleted file mode 100644 index 3200fcc2655f..000000000000 --- a/docs/changelog/91425.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91425 -summary: Forecast write load during rollovers -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/91438.yaml b/docs/changelog/91438.yaml deleted file mode 100644 index a0dcb63c3712..000000000000 --- a/docs/changelog/91438.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91438 -summary: Handle APM global labels as affix setting -area: Infra/Core -type: enhancement -issues: - - 91278 diff --git a/docs/changelog/91461.yaml b/docs/changelog/91461.yaml deleted file mode 100644 index 507e23e190b6..000000000000 --- a/docs/changelog/91461.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91461 -summary: Datastream unavailable exception metadata -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/91465.yaml b/docs/changelog/91465.yaml deleted file mode 100644 index fa8f78a897a4..000000000000 --- a/docs/changelog/91465.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91465 -summary: Support synthetic `_source` for `_doc_count` field -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/91467.yaml b/docs/changelog/91467.yaml new file mode 100644 index 000000000000..ebf7d2fcd248 --- /dev/null +++ b/docs/changelog/91467.yaml @@ -0,0 +1,5 @@ +pr: 91467 +summary: Settings api for stable plugins +area: Infra/Plugins +type: enhancement +issues: [] diff --git a/docs/changelog/91490.yaml b/docs/changelog/91490.yaml deleted file mode 100644 index a25d1312deed..000000000000 --- a/docs/changelog/91490.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91490 -summary: Avoid potential unsupported operation exception in doc bitset cache -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/91492.yaml b/docs/changelog/91492.yaml deleted file mode 100644 index 739b3fa68ab2..000000000000 --- a/docs/changelog/91492.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91492 -summary: Reduce number of object allocations in H3#geoToH3 and speed up computations -area: Geo -type: enhancement -issues: [] diff --git a/docs/changelog/91499.yaml b/docs/changelog/91499.yaml new file mode 100644 index 000000000000..600c0f27cedd --- /dev/null +++ b/docs/changelog/91499.yaml @@ -0,0 +1,5 @@ +pr: 91499 +summary: Extra `kibana_system` privileges for Fleet transform upgrades +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/91501.yaml b/docs/changelog/91501.yaml deleted file mode 100644 index 054401a090b8..000000000000 --- a/docs/changelog/91501.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91501 -summary: Fix compile with hex literals ending with d/f -area: Infra/Scripting -type: bug -issues: - - 88614 diff --git a/docs/changelog/91506.yaml b/docs/changelog/91506.yaml deleted file mode 100644 index 09263d2b7ec1..000000000000 --- a/docs/changelog/91506.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91506 -summary: SLM uneahlthy policies diagnosis recommends correct URL in action -area: Health -type: bug -issues: [] diff --git a/docs/changelog/91508.yaml b/docs/changelog/91508.yaml new file mode 100644 index 000000000000..33356fa3da11 --- /dev/null +++ b/docs/changelog/91508.yaml @@ -0,0 +1,5 @@ +pr: 91508 +summary: "Add kibana.stats.elasticsearch_client stats to the monitoring index templates." +area: Monitoring +type: enhancement +issues: [] diff --git a/docs/changelog/91510.yaml b/docs/changelog/91510.yaml deleted file mode 100644 index 06bbf0bc30eb..000000000000 --- a/docs/changelog/91510.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91510 -summary: Refine bwc version checks on `EqlSearchRequest` -area: EQL -type: bug -issues: [] diff --git a/docs/changelog/91515.yaml b/docs/changelog/91515.yaml deleted file mode 100644 index 57229217f979..000000000000 --- a/docs/changelog/91515.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91515 -summary: Use chunked encoding for `RestGetHealthAction` -area: Health -type: feature -issues: - - 90223 diff --git a/docs/changelog/91528.yaml b/docs/changelog/91528.yaml deleted file mode 100644 index 7415220c923f..000000000000 --- a/docs/changelog/91528.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91528 -summary: Expose telemetry about search usage -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/91546.yaml b/docs/changelog/91546.yaml deleted file mode 100644 index 9dbb77ab15f2..000000000000 --- a/docs/changelog/91546.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91546 -summary: Copy more settings when creating DF analytics destination index -area: Machine Learning -type: bug -issues: - - 89795 diff --git a/docs/changelog/91556.yaml b/docs/changelog/91556.yaml deleted file mode 100644 index 44d08d58730a..000000000000 --- a/docs/changelog/91556.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91556 -summary: Allow plugins to wrap Lucene directories created by the `IndexModule` -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/91561.yaml b/docs/changelog/91561.yaml deleted file mode 100644 index 91662e5ca1a9..000000000000 --- a/docs/changelog/91561.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91561 -summary: Forecast average shard size during rollovers -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/91567.yaml b/docs/changelog/91567.yaml deleted file mode 100644 index c3eeb4dc0302..000000000000 --- a/docs/changelog/91567.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91567 -summary: Refactor `DatabaseNodeService` as a cluster state listener -area: Ingest Node -type: bug -issues: - - 86999 diff --git a/docs/changelog/91587.yaml b/docs/changelog/91587.yaml deleted file mode 100644 index f9c22f961738..000000000000 --- a/docs/changelog/91587.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91587 -summary: "[HealthAPI] Use the `RestCancellableNodeClient` infrastructure" -area: Health -type: feature -issues: [] diff --git a/docs/changelog/91590.yaml b/docs/changelog/91590.yaml deleted file mode 100644 index fb02f3d260bb..000000000000 --- a/docs/changelog/91590.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91590 -summary: Clear up forecasted write load and shard size from previous write index during - rollovers -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/91603.yaml b/docs/changelog/91603.yaml deleted file mode 100644 index 8406c6d2fb22..000000000000 --- a/docs/changelog/91603.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91603 -summary: Improve shard balancing -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/91622.yaml b/docs/changelog/91622.yaml deleted file mode 100644 index f105f27d95c6..000000000000 --- a/docs/changelog/91622.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91622 -summary: Fix failure when resolving indices from CCS -area: Transform -type: bug -issues: - - 91550 diff --git a/docs/changelog/91640.yaml b/docs/changelog/91640.yaml new file mode 100644 index 000000000000..e581b03d523c --- /dev/null +++ b/docs/changelog/91640.yaml @@ -0,0 +1,5 @@ +pr: 91640 +summary: Add profiling plugin +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/91659.yaml b/docs/changelog/91659.yaml deleted file mode 100644 index 80cc3f5bb5d7..000000000000 --- a/docs/changelog/91659.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91659 -summary: Avoid NPE when disassociateDeadNodes is executed for a node present in the - desired balance -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/91673.yaml b/docs/changelog/91673.yaml new file mode 100644 index 000000000000..86fb9f487658 --- /dev/null +++ b/docs/changelog/91673.yaml @@ -0,0 +1,5 @@ +pr: 91673 +summary: "Add new H3 api method #h3ToNoChildrenIntersecting" +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/91708.yaml b/docs/changelog/91708.yaml new file mode 100644 index 000000000000..a4b9e0817508 --- /dev/null +++ b/docs/changelog/91708.yaml @@ -0,0 +1,6 @@ +pr: 91708 +summary: Expose Health Api telemetry via xpack +area: Health +type: enhancement +issues: + - 90877 diff --git a/docs/changelog/91710.yaml b/docs/changelog/91710.yaml deleted file mode 100644 index c52359d86fd8..000000000000 --- a/docs/changelog/91710.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 91710 -summary: Support fields with commas in data frame analytics `analyzed_fields` -area: Machine Learning -type: bug -issues: - - 72541 diff --git a/docs/changelog/91713.yaml b/docs/changelog/91713.yaml new file mode 100644 index 000000000000..c5fe0b73719b --- /dev/null +++ b/docs/changelog/91713.yaml @@ -0,0 +1,6 @@ +pr: 91713 +summary: Fix NPE when method was called on an array type +area: Infra/Scripting +type: bug +issues: + - 87562 diff --git a/docs/changelog/91722.yaml b/docs/changelog/91722.yaml new file mode 100644 index 000000000000..ff403d33f7e4 --- /dev/null +++ b/docs/changelog/91722.yaml @@ -0,0 +1,5 @@ +pr: 91722 +summary: Consolidate google-oauth-client to latest version +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/91725.yaml b/docs/changelog/91725.yaml deleted file mode 100644 index d36ad5fe921f..000000000000 --- a/docs/changelog/91725.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 91725 -summary: Align all usages of Jackson to be 2.14.0 -area: Infra/REST API -type: upgrade -issues: [] diff --git a/docs/changelog/91730.yaml b/docs/changelog/91730.yaml new file mode 100644 index 000000000000..3db9189da486 --- /dev/null +++ b/docs/changelog/91730.yaml @@ -0,0 +1,6 @@ +pr: 91730 +summary: More accurate total ingest stats +area: Ingest Node +type: bug +issues: + - 91358 diff --git a/docs/changelog/91769.yaml b/docs/changelog/91769.yaml new file mode 100644 index 000000000000..02fb056ad2ea --- /dev/null +++ b/docs/changelog/91769.yaml @@ -0,0 +1,6 @@ +pr: 91769 +summary: Fix synthetic `_source` for sparse `_doc_count` field +area: TSDB +type: bug +issues: + - 91731 diff --git a/docs/changelog/91780.yaml b/docs/changelog/91780.yaml new file mode 100644 index 000000000000..1cab6b9c2960 --- /dev/null +++ b/docs/changelog/91780.yaml @@ -0,0 +1,5 @@ +pr: 91780 +summary: Check stable plugin version at install and load time +area: Infra/Plugins +type: enhancement +issues: [] diff --git a/docs/changelog/91781.yaml b/docs/changelog/91781.yaml new file mode 100644 index 000000000000..7513ec86baf4 --- /dev/null +++ b/docs/changelog/91781.yaml @@ -0,0 +1,5 @@ +pr: 91781 +summary: JWT realm - Initial support for access tokens +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/91783.yaml b/docs/changelog/91783.yaml new file mode 100644 index 000000000000..02f3dd5b10c1 --- /dev/null +++ b/docs/changelog/91783.yaml @@ -0,0 +1,5 @@ +pr: 91783 +summary: "EQL Samples: add support for multiple samples per key" +area: EQL +type: enhancement +issues: [] diff --git a/docs/changelog/91802.yaml b/docs/changelog/91802.yaml new file mode 100644 index 000000000000..9432ac4bab64 --- /dev/null +++ b/docs/changelog/91802.yaml @@ -0,0 +1,5 @@ +pr: 91802 +summary: Fix potential leak in `RemoteRecoveryHandler` +area: Recovery +type: bug +issues: [] diff --git a/docs/changelog/91839.yaml b/docs/changelog/91839.yaml new file mode 100644 index 000000000000..205319b11023 --- /dev/null +++ b/docs/changelog/91839.yaml @@ -0,0 +1,5 @@ +pr: 91839 +summary: Speed h3 library by using `FastMath` implementation for trigonometric functions +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/91846.yaml b/docs/changelog/91846.yaml new file mode 100644 index 000000000000..3c9db04c8337 --- /dev/null +++ b/docs/changelog/91846.yaml @@ -0,0 +1,5 @@ +pr: 91846 +summary: Upgrade to Netty 4.1.85 +area: Network +type: upgrade +issues: [] diff --git a/docs/changelog/91869.yaml b/docs/changelog/91869.yaml new file mode 100644 index 000000000000..1e38cfd9dd6a --- /dev/null +++ b/docs/changelog/91869.yaml @@ -0,0 +1,5 @@ +pr: 91869 +summary: Load stable plugins as synthetic modules +area: Infra/Plugins +type: enhancement +issues: [] diff --git a/docs/changelog/91873.yaml b/docs/changelog/91873.yaml new file mode 100644 index 000000000000..3f2fb776bb63 --- /dev/null +++ b/docs/changelog/91873.yaml @@ -0,0 +1,5 @@ +pr: 91873 +summary: Record timestamp on API key invalidation +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/91886.yaml b/docs/changelog/91886.yaml new file mode 100644 index 000000000000..b32ac9e5b5d0 --- /dev/null +++ b/docs/changelog/91886.yaml @@ -0,0 +1,5 @@ +pr: 91886 +summary: Pre-authorize child search transport actions +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/91925.yaml b/docs/changelog/91925.yaml new file mode 100644 index 000000000000..731ce402d98f --- /dev/null +++ b/docs/changelog/91925.yaml @@ -0,0 +1,5 @@ +pr: 91925 +summary: Secure settings that can fall back to yml in Stateless +area: Distributed +type: feature +issues: [] diff --git a/docs/changelog/91956.yaml b/docs/changelog/91956.yaml new file mode 100644 index 000000000000..4df8fa057dcc --- /dev/null +++ b/docs/changelog/91956.yaml @@ -0,0 +1,5 @@ +pr: 91956 +summary: Geohex aggregation on `geo_shape` field +area: Geo +type: feature +issues: [] diff --git a/docs/changelog/92017.yaml b/docs/changelog/92017.yaml new file mode 100644 index 000000000000..84016910b258 --- /dev/null +++ b/docs/changelog/92017.yaml @@ -0,0 +1,5 @@ +pr: 92017 +summary: Add commits listener for `InternalEngine` and `CombinedDeletionPolicy` +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/92025.yaml b/docs/changelog/92025.yaml new file mode 100644 index 000000000000..a1ce0d2ee863 --- /dev/null +++ b/docs/changelog/92025.yaml @@ -0,0 +1,6 @@ +pr: 92025 +summary: Use primitive types rather than boxing/unboxing for iterating over primitive + arrays from defs +area: Infra/Scripting +type: enhancement +issues: [] diff --git a/docs/changelog/92087.yaml b/docs/changelog/92087.yaml new file mode 100644 index 000000000000..14faa9abd005 --- /dev/null +++ b/docs/changelog/92087.yaml @@ -0,0 +1,5 @@ +pr: 92087 +summary: Use all profiling events on startup +area: Search +type: bug +issues: [] diff --git a/docs/changelog/92099.yaml b/docs/changelog/92099.yaml new file mode 100644 index 000000000000..4b50e6caf7b2 --- /dev/null +++ b/docs/changelog/92099.yaml @@ -0,0 +1,6 @@ +pr: 92099 +summary: Add methods to prevent allocating long arrays during child navigation on + H3 api +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/92101.yaml b/docs/changelog/92101.yaml new file mode 100644 index 000000000000..509166d7330f --- /dev/null +++ b/docs/changelog/92101.yaml @@ -0,0 +1,5 @@ +pr: 92101 +summary: Add primary term supplier to Engine.IndexCommitListener +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/92102.yaml b/docs/changelog/92102.yaml new file mode 100644 index 000000000000..99a1cbcc4b38 --- /dev/null +++ b/docs/changelog/92102.yaml @@ -0,0 +1,5 @@ +pr: 92102 +summary: Short-circuit painless def equality +area: Infra/Scripting +type: enhancement +issues: [] diff --git a/docs/changelog/92104.yaml b/docs/changelog/92104.yaml new file mode 100644 index 000000000000..19c66c14773d --- /dev/null +++ b/docs/changelog/92104.yaml @@ -0,0 +1,5 @@ +pr: 92104 +summary: Upgrading tika to 2.6.0 +area: Ingest Node +type: upgrade +issues: [] diff --git a/docs/changelog/92115.yaml b/docs/changelog/92115.yaml new file mode 100644 index 000000000000..d8f9defebb67 --- /dev/null +++ b/docs/changelog/92115.yaml @@ -0,0 +1,5 @@ +pr: 92115 +summary: Enable bloom filter for `_id` field in tsdb indices +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/92118.yaml b/docs/changelog/92118.yaml new file mode 100644 index 000000000000..a59cac45cb2c --- /dev/null +++ b/docs/changelog/92118.yaml @@ -0,0 +1,14 @@ +pr: 92118 +summary: Allow more than one KNN search clause +area: Vector Search +type: enhancement +issues: + - 91187 +highlight: + title: Allow more than one KNN search clause + body: "Some vector search scenarios require relevance ranking using a few kNN clauses,\n\ + e.g. when ranking based on several fields, each with its own vector, or when a document \n\ + includes a vector for the image and another vector for the text. The user may want to obtain\n\ + relevance ranking based on a combination of all of these kNN clauses.\n\ncloses\ + \ https://github.com/elastic/elasticsearch/issues/91187" + notable: true diff --git a/docs/changelog/92123.yaml b/docs/changelog/92123.yaml new file mode 100644 index 000000000000..d124315711f2 --- /dev/null +++ b/docs/changelog/92123.yaml @@ -0,0 +1,5 @@ +pr: 92123 +summary: Align all usages of protobuf to be 3.21.9 +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/92171.yaml b/docs/changelog/92171.yaml new file mode 100644 index 000000000000..fa98bc20d576 --- /dev/null +++ b/docs/changelog/92171.yaml @@ -0,0 +1,5 @@ +pr: 92171 +summary: Forwarding simulate calls to ingest nodes +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/92197.yaml b/docs/changelog/92197.yaml new file mode 100644 index 000000000000..a28740217277 --- /dev/null +++ b/docs/changelog/92197.yaml @@ -0,0 +1,5 @@ +pr: 92197 +summary: Optimize composite agg with leading global ordinal value source +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/92199.yaml b/docs/changelog/92199.yaml new file mode 100644 index 000000000000..af71777f1ff9 --- /dev/null +++ b/docs/changelog/92199.yaml @@ -0,0 +1,5 @@ +pr: 92199 +summary: Expose tier balancing stats via internal endpoint +area: "Allocation" +type: enhancement +issues: [] diff --git a/docs/changelog/92204.yaml b/docs/changelog/92204.yaml new file mode 100644 index 000000000000..ca0b8ff7e738 --- /dev/null +++ b/docs/changelog/92204.yaml @@ -0,0 +1,5 @@ +pr: 92204 +summary: Free resources correctly when model loading is cancelled +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/92213.yaml b/docs/changelog/92213.yaml new file mode 100644 index 000000000000..6cbde4e15bf0 --- /dev/null +++ b/docs/changelog/92213.yaml @@ -0,0 +1,5 @@ +pr: 92213 +summary: Make native inference generally available +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/92219.yaml b/docs/changelog/92219.yaml new file mode 100644 index 000000000000..2b889a9c3210 --- /dev/null +++ b/docs/changelog/92219.yaml @@ -0,0 +1,5 @@ +pr: 92219 +summary: Configurable retention period for invalidated or expired API keys +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/92232.yaml b/docs/changelog/92232.yaml new file mode 100644 index 000000000000..0ace8ab75f8f --- /dev/null +++ b/docs/changelog/92232.yaml @@ -0,0 +1,6 @@ +pr: 92232 +summary: Better error when `aggregate_metric_double` used in scrolling datafeeds +area: Machine Learning +type: enhancement +issues: + - 90592 diff --git a/docs/changelog/92238.yaml b/docs/changelog/92238.yaml new file mode 100644 index 000000000000..c6d5128452bc --- /dev/null +++ b/docs/changelog/92238.yaml @@ -0,0 +1,5 @@ +pr: 92238 +summary: Diff the list of filenames that are added by each new commit +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/92239.yaml b/docs/changelog/92239.yaml new file mode 100644 index 000000000000..7cb621973752 --- /dev/null +++ b/docs/changelog/92239.yaml @@ -0,0 +1,5 @@ +pr: 92239 +summary: Stop the `frequent_items` aggregation reporting a subset when a superset exists +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/92252.yaml b/docs/changelog/92252.yaml new file mode 100644 index 000000000000..5c45a46a5d3a --- /dev/null +++ b/docs/changelog/92252.yaml @@ -0,0 +1,5 @@ +pr: 92252 +summary: Preventing ILM and SLM runtime state from being stored in a snapshot +area: ILM+SLM +type: bug +issues: [] diff --git a/docs/changelog/92255.yaml b/docs/changelog/92255.yaml new file mode 100644 index 000000000000..abbb68ce4903 --- /dev/null +++ b/docs/changelog/92255.yaml @@ -0,0 +1,5 @@ +pr: 92255 +summary: Avoid capturing cluster state in TBbNA +area: Stats +type: bug +issues: [] diff --git a/docs/changelog/92269.yaml b/docs/changelog/92269.yaml new file mode 100644 index 000000000000..5e1381248bb8 --- /dev/null +++ b/docs/changelog/92269.yaml @@ -0,0 +1,5 @@ +pr: 92269 +summary: Access term dictionary more efficiently +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/92276.yaml b/docs/changelog/92276.yaml new file mode 100644 index 000000000000..0727c512496a --- /dev/null +++ b/docs/changelog/92276.yaml @@ -0,0 +1,5 @@ +pr: 92276 +summary: Minor TSDB parsing speedup +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/92296.yaml b/docs/changelog/92296.yaml new file mode 100644 index 000000000000..75ecd5885edb --- /dev/null +++ b/docs/changelog/92296.yaml @@ -0,0 +1,6 @@ +pr: 92296 +summary: "[HealthAPI] Add support for the FEATURE_STATE affected resource" +area: Health +type: feature +issues: + - 91353 diff --git a/docs/changelog/92303.yaml b/docs/changelog/92303.yaml new file mode 100644 index 000000000000..beb3a0580a3b --- /dev/null +++ b/docs/changelog/92303.yaml @@ -0,0 +1,5 @@ +pr: 92303 +summary: Add `forecasted_write_load` and `forecasted_shard_size_in_bytes` to the endpoint +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/92309.yaml b/docs/changelog/92309.yaml new file mode 100644 index 000000000000..e024ca73f56a --- /dev/null +++ b/docs/changelog/92309.yaml @@ -0,0 +1,5 @@ +pr: 92309 +summary: Increase the number of threads of GET threadpool +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/92314.yaml b/docs/changelog/92314.yaml new file mode 100644 index 000000000000..1de076290588 --- /dev/null +++ b/docs/changelog/92314.yaml @@ -0,0 +1,5 @@ +pr: 92314 +summary: JWT realm - add support for required claims +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/92315.yaml b/docs/changelog/92315.yaml new file mode 100644 index 000000000000..9180467f5db0 --- /dev/null +++ b/docs/changelog/92315.yaml @@ -0,0 +1,5 @@ +pr: 92315 +summary: JWT realm - Simplify token principal calculation +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/92322.yaml b/docs/changelog/92322.yaml new file mode 100644 index 000000000000..3b7d93fc6a4e --- /dev/null +++ b/docs/changelog/92322.yaml @@ -0,0 +1,5 @@ +pr: 92322 +summary: implement extension pruning in frequent items to improve runtime +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/92328.yaml b/docs/changelog/92328.yaml new file mode 100644 index 000000000000..e1cfd9ef955a --- /dev/null +++ b/docs/changelog/92328.yaml @@ -0,0 +1,5 @@ +pr: 92328 +summary: Increase the default timeout for the start trained model deployment API +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/92329.yaml b/docs/changelog/92329.yaml new file mode 100644 index 000000000000..5eab808b7ca9 --- /dev/null +++ b/docs/changelog/92329.yaml @@ -0,0 +1,5 @@ +pr: 92329 +summary: Fix tokenization bug when handling normalization in BERT and MPNet +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/92334.yaml b/docs/changelog/92334.yaml new file mode 100644 index 000000000000..299a7e172a14 --- /dev/null +++ b/docs/changelog/92334.yaml @@ -0,0 +1,5 @@ +pr: 92334 +summary: Example stable plugins with settings +area: Infra/Plugins +type: enhancement +issues: [] diff --git a/docs/changelog/92335.yaml b/docs/changelog/92335.yaml new file mode 100644 index 000000000000..9dc21fdcdc51 --- /dev/null +++ b/docs/changelog/92335.yaml @@ -0,0 +1,6 @@ +pr: 92335 +summary: Download the geoip databases only when needed +area: Ingest Node +type: bug +issues: + - 90673 diff --git a/docs/changelog/92340.yaml b/docs/changelog/92340.yaml new file mode 100644 index 000000000000..3ed47c66452b --- /dev/null +++ b/docs/changelog/92340.yaml @@ -0,0 +1,5 @@ +pr: 92340 +summary: Add vector distance scoring to micro benchmarks +area: Performance +type: enhancement +issues: [] diff --git a/docs/changelog/92360.yaml b/docs/changelog/92360.yaml new file mode 100644 index 000000000000..7f389646f7e7 --- /dev/null +++ b/docs/changelog/92360.yaml @@ -0,0 +1,5 @@ +pr: 92360 +summary: Fix missing override for matches in `ProfileWeight` +area: Search +type: bug +issues: [] diff --git a/docs/changelog/92372.yaml b/docs/changelog/92372.yaml new file mode 100644 index 000000000000..7c882b22236e --- /dev/null +++ b/docs/changelog/92372.yaml @@ -0,0 +1,16 @@ +pr: 92372 +summary: Speed up ingest geoip processors +area: Ingest Node +type: bug +issues: [] +highlight: + title: Speed up ingest geoip processors + body: |- + The `geoip` ingest processor is significantly faster. + + Previous versions of the geoip library needed special permission to execute + databinding code, requiring an expensive permissions check and + `AccessController.doPrivileged` call. The current version of the geoip + library no longer requires that, however, so the expensive code has been + removed, resulting in better performance for the ingest geoip processor. + notable: true diff --git a/docs/changelog/92380.yaml b/docs/changelog/92380.yaml new file mode 100644 index 000000000000..73a1af2a56b5 --- /dev/null +++ b/docs/changelog/92380.yaml @@ -0,0 +1,5 @@ +pr: 92380 +summary: Runtime fields to optionally ignore script errors +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/92382.yaml b/docs/changelog/92382.yaml new file mode 100644 index 000000000000..6621307ae4fb --- /dev/null +++ b/docs/changelog/92382.yaml @@ -0,0 +1,5 @@ +pr: 92382 +summary: Add log level for JVM logs +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/92387.yaml b/docs/changelog/92387.yaml new file mode 100644 index 000000000000..558a94169c93 --- /dev/null +++ b/docs/changelog/92387.yaml @@ -0,0 +1,6 @@ +pr: 92387 +summary: Add `jdk.internal.reflect` permission to es codebase +area: Infra/Core +type: bug +issues: + - 92356 diff --git a/docs/changelog/92395.yaml b/docs/changelog/92395.yaml new file mode 100644 index 000000000000..11d0ecdf758e --- /dev/null +++ b/docs/changelog/92395.yaml @@ -0,0 +1,11 @@ +pr: 92395 +summary: Speed up ingest set and append processors +area: Ingest Node +type: bug +issues: [] +highlight: + title: Speed up ingest set and append processors + body: |- + `set` and `append` ingest processors that use mustache templates are + significantly faster. + notable: true diff --git a/docs/changelog/92399.yaml b/docs/changelog/92399.yaml new file mode 100644 index 000000000000..027f3ac85969 --- /dev/null +++ b/docs/changelog/92399.yaml @@ -0,0 +1,7 @@ +pr: 92399 +summary: "[HealthAPI] Add size parameter that controls the number of affected resources\ + \ returned" +area: Health +type: feature +issues: + - 91930 diff --git a/docs/changelog/92414.yaml b/docs/changelog/92414.yaml new file mode 100644 index 000000000000..c062122833c4 --- /dev/null +++ b/docs/changelog/92414.yaml @@ -0,0 +1,5 @@ +pr: 92414 +summary: Add the ability to include and exclude values in Frequent items +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/92417.yaml b/docs/changelog/92417.yaml new file mode 100644 index 000000000000..b766fe0384f1 --- /dev/null +++ b/docs/changelog/92417.yaml @@ -0,0 +1,5 @@ +pr: 92417 +summary: Improve anomaly detection results indexing speed +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/92422.yaml b/docs/changelog/92422.yaml new file mode 100644 index 000000000000..5d77e03e2eb8 --- /dev/null +++ b/docs/changelog/92422.yaml @@ -0,0 +1,5 @@ +pr: 92422 +summary: Make `FilterStreamInput` less trappy +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/92428.yaml b/docs/changelog/92428.yaml new file mode 100644 index 000000000000..dda2ff132feb --- /dev/null +++ b/docs/changelog/92428.yaml @@ -0,0 +1,5 @@ +pr: 92428 +summary: Introduce parameterized rule and executor +area: Query Languages +type: enhancement +issues: [] diff --git a/docs/changelog/92436.yaml b/docs/changelog/92436.yaml new file mode 100644 index 000000000000..1f8b4a9bf187 --- /dev/null +++ b/docs/changelog/92436.yaml @@ -0,0 +1,6 @@ +pr: 92436 +summary: Add `ignore_missing_component_templates` config option +area: Indices APIs +type: enhancement +issues: + - 92426 diff --git a/docs/changelog/92455.yaml b/docs/changelog/92455.yaml new file mode 100644 index 000000000000..e7f77c315e6b --- /dev/null +++ b/docs/changelog/92455.yaml @@ -0,0 +1,5 @@ +pr: 92455 +summary: Correctly handle an exception case for ingest failure +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/92456.yaml b/docs/changelog/92456.yaml new file mode 100644 index 000000000000..213f6e85870d --- /dev/null +++ b/docs/changelog/92456.yaml @@ -0,0 +1,5 @@ +pr: 92456 +summary: Skip duplicate checks on segments that don't contain the document's timestamp +area: TSDB +type: enhancement +issues: [] diff --git a/docs/changelog/92457.yaml b/docs/changelog/92457.yaml new file mode 100644 index 000000000000..e8a2e7207cad --- /dev/null +++ b/docs/changelog/92457.yaml @@ -0,0 +1,5 @@ +pr: 92457 +summary: Bump reactor netty version +area: Snapshot/Restore +type: upgrade +issues: [] diff --git a/docs/changelog/92460.yaml b/docs/changelog/92460.yaml new file mode 100644 index 000000000000..4fc47007ea8a --- /dev/null +++ b/docs/changelog/92460.yaml @@ -0,0 +1,5 @@ +pr: 92460 +summary: Check `GeohexGrid` bounds on geopoint using spherical coordinates +area: Geo +type: bug +issues: [] diff --git a/docs/changelog/92489.yaml b/docs/changelog/92489.yaml new file mode 100644 index 000000000000..e2f3c4cd19e1 --- /dev/null +++ b/docs/changelog/92489.yaml @@ -0,0 +1,5 @@ +pr: 92489 +summary: Make `RecoveryPlannerService` optional +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/92490.yaml b/docs/changelog/92490.yaml new file mode 100644 index 000000000000..5f6f63f3fc27 --- /dev/null +++ b/docs/changelog/92490.yaml @@ -0,0 +1,5 @@ +pr: 92490 +summary: Make clean up files step configurable for peer-recovery of replicas +area: Recovery +type: enhancement +issues: [] diff --git a/docs/changelog/92528.yaml b/docs/changelog/92528.yaml new file mode 100644 index 000000000000..03d12eea5b0e --- /dev/null +++ b/docs/changelog/92528.yaml @@ -0,0 +1,5 @@ +pr: 92528 +summary: Scan stable plugins for named components upon install +area: Infra/CLI +type: enhancement +issues: [] diff --git a/docs/changelog/92546.yaml b/docs/changelog/92546.yaml new file mode 100644 index 000000000000..1549d512453e --- /dev/null +++ b/docs/changelog/92546.yaml @@ -0,0 +1,5 @@ +pr: 92546 +summary: Delay Connection#onRemoved while pending +area: Network +type: bug +issues: [] diff --git a/docs/changelog/92547.yaml b/docs/changelog/92547.yaml new file mode 100644 index 000000000000..2e39c93724f4 --- /dev/null +++ b/docs/changelog/92547.yaml @@ -0,0 +1,5 @@ +pr: 92547 +summary: Don't create a new `DoubleHistogram` instance for empty buckets +area: Aggregations +type: bug +issues: [] diff --git a/docs/changelog/92558.yaml b/docs/changelog/92558.yaml new file mode 100644 index 000000000000..f0621449fa20 --- /dev/null +++ b/docs/changelog/92558.yaml @@ -0,0 +1,6 @@ +pr: 92558 +summary: Protect `NodeConnectionsService` from stale conns +area: Network +type: bug +issues: + - 92029 diff --git a/docs/changelog/92586.yaml b/docs/changelog/92586.yaml new file mode 100644 index 000000000000..f6d0c58ade30 --- /dev/null +++ b/docs/changelog/92586.yaml @@ -0,0 +1,6 @@ +pr: 92586 +summary: "Grok returns a list of matches for repeated pattern names #92092" +area: Ingest Node +type: bug +issues: + - 92092 diff --git a/docs/changelog/92587.yaml b/docs/changelog/92587.yaml new file mode 100644 index 000000000000..bc6b6b5b6e8c --- /dev/null +++ b/docs/changelog/92587.yaml @@ -0,0 +1,5 @@ +pr: 92587 +summary: Upgrade to Netty 4.1.86 +area: Network +type: upgrade +issues: [] diff --git a/docs/changelog/92625.yaml b/docs/changelog/92625.yaml new file mode 100644 index 000000000000..232da48fc06d --- /dev/null +++ b/docs/changelog/92625.yaml @@ -0,0 +1,6 @@ +pr: 92625 +summary: Fix Security's expression resolver to not remove unavailable but authorized + names +area: Authorization +type: bug +issues: [] diff --git a/docs/changelog/92640.yaml b/docs/changelog/92640.yaml new file mode 100644 index 000000000000..8c2dc2415907 --- /dev/null +++ b/docs/changelog/92640.yaml @@ -0,0 +1,5 @@ +pr: 92640 +summary: Added new field `rollout_duration_seconds` to fleet-actions +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/92659.yaml b/docs/changelog/92659.yaml new file mode 100644 index 000000000000..4e90acbb1395 --- /dev/null +++ b/docs/changelog/92659.yaml @@ -0,0 +1,5 @@ +pr: 92659 +summary: Set a fixed compound file threshold of 1GB +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/92668.yaml b/docs/changelog/92668.yaml new file mode 100644 index 000000000000..ffa45030ec67 --- /dev/null +++ b/docs/changelog/92668.yaml @@ -0,0 +1,5 @@ +pr: 92668 +summary: "Introduce ShardRouting.Role" +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/92680.yaml b/docs/changelog/92680.yaml new file mode 100644 index 000000000000..ebf8e2ff84b2 --- /dev/null +++ b/docs/changelog/92680.yaml @@ -0,0 +1,5 @@ +pr: 92680 +summary: Fix `InputStream#readAllBytes` on `InputStreamIndexInput` +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/92692.yaml b/docs/changelog/92692.yaml new file mode 100644 index 000000000000..eb6830498779 --- /dev/null +++ b/docs/changelog/92692.yaml @@ -0,0 +1,6 @@ +pr: 92692 +summary: Allow different filters per `DataStream` in a `DataStreamAlias` +area: Data streams +type: bug +issues: + - 92050 diff --git a/docs/changelog/92693.yaml b/docs/changelog/92693.yaml new file mode 100644 index 000000000000..c6d6fd90c64f --- /dev/null +++ b/docs/changelog/92693.yaml @@ -0,0 +1,6 @@ +pr: 92693 +summary: Delay master task failure notifications until commit +area: Cluster Coordination +type: bug +issues: + - 92677 diff --git a/docs/changelog/92695.yaml b/docs/changelog/92695.yaml new file mode 100644 index 000000000000..d9b404744448 --- /dev/null +++ b/docs/changelog/92695.yaml @@ -0,0 +1,5 @@ +pr: 92695 +summary: "[Rest Api Compatibility] Format response media type with parameters" +area: Infra/REST API +type: bug +issues: [] diff --git a/docs/changelog/92711.yaml b/docs/changelog/92711.yaml new file mode 100644 index 000000000000..9659c89f4194 --- /dev/null +++ b/docs/changelog/92711.yaml @@ -0,0 +1,5 @@ +pr: 92711 +summary: Add methods to H3#hexRing to prevent allocating long arrays +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/92742.yaml b/docs/changelog/92742.yaml new file mode 100644 index 000000000000..ac63c5e226c9 --- /dev/null +++ b/docs/changelog/92742.yaml @@ -0,0 +1,5 @@ +pr: 92742 +summary: "Improve node-{join,left} logging for troubleshooting" +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/docs/changelog/92744.yaml b/docs/changelog/92744.yaml new file mode 100644 index 000000000000..7c121d1d22f2 --- /dev/null +++ b/docs/changelog/92744.yaml @@ -0,0 +1,5 @@ +pr: 92744 +summary: Repeat `cluster.initial_master_nodes` log warning +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/docs/changelog/92755.yaml b/docs/changelog/92755.yaml new file mode 100644 index 000000000000..8f52817d2a0e --- /dev/null +++ b/docs/changelog/92755.yaml @@ -0,0 +1,6 @@ +pr: 92755 +summary: Add links to troubleshooting docs +area: Cluster Coordination +type: enhancement +issues: + - 92741 diff --git a/docs/changelog/92762.yaml b/docs/changelog/92762.yaml new file mode 100644 index 000000000000..a5953ce3117f --- /dev/null +++ b/docs/changelog/92762.yaml @@ -0,0 +1,6 @@ +pr: 92762 +summary: Integrate "sourceHasChanged" call into failure handling and retry logic +area: Transform +type: bug +issues: + - 92133 diff --git a/docs/changelog/92777.yaml b/docs/changelog/92777.yaml new file mode 100644 index 000000000000..7159a258e897 --- /dev/null +++ b/docs/changelog/92777.yaml @@ -0,0 +1,5 @@ +pr: 92777 +summary: Reduce memory usage of match all bitset +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/92784.yaml b/docs/changelog/92784.yaml new file mode 100644 index 000000000000..06c8e767da60 --- /dev/null +++ b/docs/changelog/92784.yaml @@ -0,0 +1,6 @@ +pr: 92784 +summary: Update the version of asm used by plugin scanner +area: Infra/Plugins +type: bug +issues: + - 92782 diff --git a/docs/changelog/92787.yaml b/docs/changelog/92787.yaml new file mode 100644 index 000000000000..6e7339eac228 --- /dev/null +++ b/docs/changelog/92787.yaml @@ -0,0 +1,5 @@ +pr: 92787 +summary: Enable profiling plugin by default +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/92829.yaml b/docs/changelog/92829.yaml new file mode 100644 index 000000000000..69ad54928f89 --- /dev/null +++ b/docs/changelog/92829.yaml @@ -0,0 +1,5 @@ +pr: 92829 +summary: Protect H3 library against integer overflow +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/92863.yaml b/docs/changelog/92863.yaml new file mode 100644 index 000000000000..2d4a7f4e7a5e --- /dev/null +++ b/docs/changelog/92863.yaml @@ -0,0 +1,5 @@ +pr: 92863 +summary: Support retrieving inlined stack frames +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/92871.yaml b/docs/changelog/92871.yaml new file mode 100644 index 000000000000..f401f50e7461 --- /dev/null +++ b/docs/changelog/92871.yaml @@ -0,0 +1,5 @@ +pr: 92871 +summary: Support custom PBKDF2 password hashes +area: Authentication +type: enhancement +issues: [] diff --git a/docs/changelog/92879.yaml b/docs/changelog/92879.yaml new file mode 100644 index 000000000000..0e9d0d8ab6cc --- /dev/null +++ b/docs/changelog/92879.yaml @@ -0,0 +1,13 @@ +pr: 92879 +summary: The Health API is now generally available +area: Health +type: feature +issues: [] +highlight: + title: The Health API is now generally available + body: |- + Elasticsearch introduces a new Health API designed to report the health of + the cluster. The new API provides both a high level overview of the cluster + health, and a very detailed report that can include a precise diagnosis and + a resolution. + notable: true diff --git a/docs/changelog/92880.yaml b/docs/changelog/92880.yaml new file mode 100644 index 000000000000..5336987ee2cd --- /dev/null +++ b/docs/changelog/92880.yaml @@ -0,0 +1,5 @@ +pr: 92880 +summary: Cache the creation of parsers within DateProcessor +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/92890.yaml b/docs/changelog/92890.yaml new file mode 100644 index 000000000000..3bd38e6152e5 --- /dev/null +++ b/docs/changelog/92890.yaml @@ -0,0 +1,6 @@ +pr: 92890 +summary: Fix unclosed directory stream in `ClassReaders` +area: Infra/Plugins +type: bug +issues: + - 92866 diff --git a/docs/changelog/92920.yaml b/docs/changelog/92920.yaml new file mode 100644 index 000000000000..7c14ecd34ac2 --- /dev/null +++ b/docs/changelog/92920.yaml @@ -0,0 +1,6 @@ +pr: 92920 +summary: Annotated highlighter does not match when search contains both annotation and annotated term +area: Search +type: bug +issues: + - 91944 diff --git a/docs/changelog/92928.yaml b/docs/changelog/92928.yaml new file mode 100644 index 000000000000..0f86adf45d1b --- /dev/null +++ b/docs/changelog/92928.yaml @@ -0,0 +1,5 @@ +pr: 92928 +summary: Ensure one-shot wrappers release their delegates +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/92948.yaml b/docs/changelog/92948.yaml new file mode 100644 index 000000000000..a335e60035ce --- /dev/null +++ b/docs/changelog/92948.yaml @@ -0,0 +1,6 @@ +pr: 92948 +summary: Transform _schedule_now API +area: Transform +type: feature +issues: + - 44722 diff --git a/docs/changelog/92950.yaml b/docs/changelog/92950.yaml new file mode 100644 index 000000000000..464d56bbda2e --- /dev/null +++ b/docs/changelog/92950.yaml @@ -0,0 +1,5 @@ +pr: 92950 +summary: Add monitoring mappings for es ingest metricset +area: Monitoring +type: enhancement +issues: [] diff --git a/docs/changelog/92957.yaml b/docs/changelog/92957.yaml new file mode 100644 index 000000000000..0afc61ba9ec6 --- /dev/null +++ b/docs/changelog/92957.yaml @@ -0,0 +1,5 @@ +pr: 92957 +summary: Upgrade to lucene-9.5.0-snapshot-d19c3e2e0ed +area: Search +type: upgrade +issues: [] diff --git a/docs/changelog/92973.yaml b/docs/changelog/92973.yaml new file mode 100644 index 000000000000..c20df5ad55e6 --- /dev/null +++ b/docs/changelog/92973.yaml @@ -0,0 +1,5 @@ +pr: 92973 +summary: Fix indices resolver for datemath with colon +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/92976.yaml b/docs/changelog/92976.yaml new file mode 100644 index 000000000000..918606b124b8 --- /dev/null +++ b/docs/changelog/92976.yaml @@ -0,0 +1,6 @@ +pr: 92976 +summary: Report recovered files as recovered from snapshot for fully mounted searchable + snapshots +area: Recovery +type: bug +issues: [] diff --git a/docs/changelog/93000.yaml b/docs/changelog/93000.yaml new file mode 100644 index 000000000000..33c933e393b6 --- /dev/null +++ b/docs/changelog/93000.yaml @@ -0,0 +1,5 @@ +pr: 93000 +summary: Persist data counts and datafeed timing stats asynchronously +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/93084.yaml b/docs/changelog/93084.yaml new file mode 100644 index 000000000000..e6baa47190ff --- /dev/null +++ b/docs/changelog/93084.yaml @@ -0,0 +1,6 @@ +pr: 93084 +summary: "[H3] Compute destination point from distance and azimuth using planar 3d\ + \ math" +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/93157.yaml b/docs/changelog/93157.yaml new file mode 100644 index 000000000000..02d816b7fe82 --- /dev/null +++ b/docs/changelog/93157.yaml @@ -0,0 +1,6 @@ +pr: 93157 +summary: Take into account `max_headroom` in disk watermark calculations +area: Health +type: bug +issues: + - 93155 diff --git a/docs/changelog/93165.yaml b/docs/changelog/93165.yaml new file mode 100644 index 000000000000..6b57f80c153e --- /dev/null +++ b/docs/changelog/93165.yaml @@ -0,0 +1,5 @@ +pr: 93165 +summary: Switch to Lucene's new `IntField/LongField/FloatField/DoubleField` +area: Mapping +type: enhancement +issues: [] diff --git a/docs/changelog/93177.yaml b/docs/changelog/93177.yaml new file mode 100644 index 000000000000..6a539f5372e4 --- /dev/null +++ b/docs/changelog/93177.yaml @@ -0,0 +1,5 @@ +pr: 93177 +summary: Fix geo ip database file leak when processing IP arrays +area: Ingest Node +type: bug +issues: [] diff --git a/docs/changelog/93178.yaml b/docs/changelog/93178.yaml new file mode 100644 index 000000000000..1622c9d98669 --- /dev/null +++ b/docs/changelog/93178.yaml @@ -0,0 +1,6 @@ +pr: 93178 +summary: Restore printing bootstrap checks as errors +area: Infra/CLI +type: bug +issues: + - 93074 diff --git a/docs/changelog/93179.yaml b/docs/changelog/93179.yaml new file mode 100644 index 000000000000..bb18523d630d --- /dev/null +++ b/docs/changelog/93179.yaml @@ -0,0 +1,6 @@ +pr: 93179 +summary: Making `JsonProcessor` stricter so that it does not silently drop data +area: Ingest Node +type: bug +issues: + - 92898 diff --git a/docs/changelog/93188.yaml b/docs/changelog/93188.yaml new file mode 100644 index 000000000000..bd4f47f74853 --- /dev/null +++ b/docs/changelog/93188.yaml @@ -0,0 +1,5 @@ +pr: 93188 +summary: Adjust range of allowed percentages of deletes in an index +area: Engine +type: enhancement +issues: [] diff --git a/docs/changelog/93203.yaml b/docs/changelog/93203.yaml new file mode 100644 index 000000000000..771c07d40190 --- /dev/null +++ b/docs/changelog/93203.yaml @@ -0,0 +1,5 @@ +pr: 93203 +summary: Support "offset" parameter in `DateHistogramGroupSource` +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/93210.yaml b/docs/changelog/93210.yaml new file mode 100644 index 000000000000..179f4ab9dec8 --- /dev/null +++ b/docs/changelog/93210.yaml @@ -0,0 +1,5 @@ +pr: 93210 +summary: Unpromotables skip replication and peer recovery +area: Allocation +type: enhancement +issues: [] diff --git a/docs/changelog/93221.yaml b/docs/changelog/93221.yaml new file mode 100644 index 000000000000..7ee858d7718c --- /dev/null +++ b/docs/changelog/93221.yaml @@ -0,0 +1,5 @@ +pr: 93221 +summary: Trigger state persistence based on time +area: Transform +type: enhancement +issues: [] diff --git a/docs/changelog/93238.yaml b/docs/changelog/93238.yaml new file mode 100644 index 000000000000..711b81d529b7 --- /dev/null +++ b/docs/changelog/93238.yaml @@ -0,0 +1,5 @@ +pr: 93238 +summary: "Upgrade antlr to 4.11.1 for ql, eql and sql" +area: Query Languages +type: upgrade +issues: [] diff --git a/docs/changelog/93247.yaml b/docs/changelog/93247.yaml new file mode 100644 index 000000000000..e32d2acde270 --- /dev/null +++ b/docs/changelog/93247.yaml @@ -0,0 +1,5 @@ +pr: 93247 +summary: Add `term` query support to `rank_features` mapped field +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/93255.yaml b/docs/changelog/93255.yaml new file mode 100644 index 000000000000..1492a5b7155d --- /dev/null +++ b/docs/changelog/93255.yaml @@ -0,0 +1,5 @@ +pr: 93255 +summary: Improve frequent items runtime +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/93276.yaml b/docs/changelog/93276.yaml new file mode 100644 index 000000000000..be00ea97912d --- /dev/null +++ b/docs/changelog/93276.yaml @@ -0,0 +1,7 @@ +pr: 93276 +summary: -| + Correctly remove domain from realm when rewriting `Authentication` for compatibility with node versions that don't + support domains +area: Authentication +type: bug +issues: [ ] diff --git a/docs/changelog/93285.yaml b/docs/changelog/93285.yaml new file mode 100644 index 000000000000..2a539a132a45 --- /dev/null +++ b/docs/changelog/93285.yaml @@ -0,0 +1,5 @@ +pr: 93285 +summary: Make `GeoIpProcessor` backing database instance pluggable +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/93299.yaml b/docs/changelog/93299.yaml new file mode 100644 index 000000000000..13c6e64b8f1d --- /dev/null +++ b/docs/changelog/93299.yaml @@ -0,0 +1,6 @@ +pr: 93299 +summary: No length check for source-only keyword fields +area: Mapping +type: bug +issues: + - 9304 diff --git a/docs/changelog/93304.yaml b/docs/changelog/93304.yaml new file mode 100644 index 000000000000..e9d05b8c1b7e --- /dev/null +++ b/docs/changelog/93304.yaml @@ -0,0 +1,5 @@ +pr: 93304 +summary: Improve `frequent_items` performance using global ordinals +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/93306.yaml b/docs/changelog/93306.yaml new file mode 100644 index 000000000000..6857c777fb79 --- /dev/null +++ b/docs/changelog/93306.yaml @@ -0,0 +1,5 @@ +pr: 93306 +summary: Allowed indices matcher supports nested limited roles +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/93324.yaml b/docs/changelog/93324.yaml new file mode 100644 index 000000000000..12d212bddf48 --- /dev/null +++ b/docs/changelog/93324.yaml @@ -0,0 +1,6 @@ +pr: 93324 +summary: Fix data counts race condition when starting a datafeed +area: Machine Learning +type: bug +issues: + - 93298 diff --git a/docs/changelog/93329.yaml b/docs/changelog/93329.yaml new file mode 100644 index 000000000000..4585edad9b6c --- /dev/null +++ b/docs/changelog/93329.yaml @@ -0,0 +1,20 @@ +pr: 93329 +summary: Handle a default/request pipeline and a final pipeline with minimal additional + overhead +area: Ingest Node +type: bug +issues: + - 92843 + - 81244 + - 93118 +highlight: + title: Speed up ingest processing with multiple pipelines + body: |- + Processing documents with both a request/default and a final + pipeline is significantly faster. + + Rather than marshalling a document from and to json once per + pipeline, a document is now marshalled from json before any + pipelines execute and then back to json after all pipelines have + executed. + notable: true diff --git a/docs/changelog/93331.yaml b/docs/changelog/93331.yaml new file mode 100644 index 000000000000..5dd1cddc7207 --- /dev/null +++ b/docs/changelog/93331.yaml @@ -0,0 +1,5 @@ +pr: 93331 +summary: Add new `query_vector_builder` option to knn search clause +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/93333.yaml b/docs/changelog/93333.yaml new file mode 100644 index 000000000000..60048590d2d4 --- /dev/null +++ b/docs/changelog/93333.yaml @@ -0,0 +1,5 @@ +pr: 93333 +summary: "Script: Metadata `validateMetadata` optimization" +area: Infra/Scripting +type: enhancement +issues: [] diff --git a/docs/changelog/93340.yaml b/docs/changelog/93340.yaml new file mode 100644 index 000000000000..7d8c54d6155f --- /dev/null +++ b/docs/changelog/93340.yaml @@ -0,0 +1,6 @@ +pr: 93340 +summary: Nested path info shouldn't be added during `copy_to` +area: Search +type: bug +issues: + - 93117 diff --git a/docs/changelog/93370.yaml b/docs/changelog/93370.yaml new file mode 100644 index 000000000000..6cdc718ea4ad --- /dev/null +++ b/docs/changelog/93370.yaml @@ -0,0 +1,22 @@ +pr: 93370 +summary: Support geo_grid ingest processor +area: Geo +type: feature +issues: + - 92473 +highlight: + title: Support geo_grid ingest processor + body: |- + The `geo_grid` ingest processor supports creating indexable geometries from geohash, geotile and H3 cells. + + There already exists a `circle` ingest processor that creates a polygon from a point and radius definition. + This concept is useful when there is need to use spatial operations that work with indexable geometries on + geometric objects that are not defined spatially (or at least not indexable by lucene). + In this case, the string `4/8/5` does not have spatial meaning, until we interpret it as the address + of a rectangular `geotile`, and save the bounding box defining its border for further use. + Likewise we can interpret `geohash` strings like `u0` as a tile, and H3 strings like `811fbffffffffff` + as an hexagonal cell, saving the cell border as a polygon. + + [role="screenshot"] + image::../images/spatial/geogrid_h3_children.png[Kibana map with three H3 layers: cell, children and intersecting non-children] + notable: true diff --git a/docs/changelog/93375.yaml b/docs/changelog/93375.yaml new file mode 100644 index 000000000000..63114927df96 --- /dev/null +++ b/docs/changelog/93375.yaml @@ -0,0 +1,5 @@ +pr: 93375 +summary: '`TransportGetTaskAction:` Wait for the task asynchronously' +area: Task Management +type: enhancement +issues: [] diff --git a/docs/changelog/93385.yaml b/docs/changelog/93385.yaml new file mode 100644 index 000000000000..036bff2571e1 --- /dev/null +++ b/docs/changelog/93385.yaml @@ -0,0 +1,5 @@ +pr: 93385 +summary: Upgrade to Lucene 9.5.0 +area: Search +type: upgrade +issues: [] diff --git a/docs/changelog/93388.yaml b/docs/changelog/93388.yaml new file mode 100644 index 000000000000..43ddc40bde73 --- /dev/null +++ b/docs/changelog/93388.yaml @@ -0,0 +1,5 @@ +pr: 93388 +summary: Allow `null` to be provided for `dense_vector` field values +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/93392.yaml b/docs/changelog/93392.yaml new file mode 100644 index 000000000000..7ed7ffacc087 --- /dev/null +++ b/docs/changelog/93392.yaml @@ -0,0 +1,5 @@ +pr: 93392 +summary: Provide locally mounted secure settings implementation +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/93417.yaml b/docs/changelog/93417.yaml new file mode 100644 index 000000000000..422302464625 --- /dev/null +++ b/docs/changelog/93417.yaml @@ -0,0 +1,5 @@ +pr: 93417 +summary: Do not refresh all indices in `TransportBulkAction` +area: CRUD +type: bug +issues: [] diff --git a/docs/changelog/93421.yaml b/docs/changelog/93421.yaml new file mode 100644 index 000000000000..20cb13522f52 --- /dev/null +++ b/docs/changelog/93421.yaml @@ -0,0 +1,10 @@ +pr: 93421 +summary: Make `frequent_item_sets` aggregation GA +area: Machine Learning +type: feature +issues: [] +highlight: + title: Make `frequent_item_sets` aggregation GA + body: The `frequent_item_sets` aggregation has been moved from + technical preview to general availability. + notable: true diff --git a/docs/changelog/93426.yaml b/docs/changelog/93426.yaml new file mode 100644 index 000000000000..c6bf12ccf9d3 --- /dev/null +++ b/docs/changelog/93426.yaml @@ -0,0 +1,5 @@ +pr: 93426 +summary: Add ability for Watcher's webhook actions to send additional header +area: Watcher +type: enhancement +issues: [] diff --git a/docs/changelog/93431.yaml b/docs/changelog/93431.yaml new file mode 100644 index 000000000000..082a84768445 --- /dev/null +++ b/docs/changelog/93431.yaml @@ -0,0 +1,6 @@ +pr: 93431 +summary: Fix context leak in list tasks API +area: Task Management +type: bug +issues: + - 93428 diff --git a/docs/changelog/93438.yaml b/docs/changelog/93438.yaml new file mode 100644 index 000000000000..24464b4245d9 --- /dev/null +++ b/docs/changelog/93438.yaml @@ -0,0 +1,5 @@ +pr: 93438 +summary: Align all usages of Jackson to be 2.14.2 +area: Infra/Core +type: upgrade +issues: [] diff --git a/docs/changelog/93445.yaml b/docs/changelog/93445.yaml new file mode 100644 index 000000000000..e2f1d39e9bb1 --- /dev/null +++ b/docs/changelog/93445.yaml @@ -0,0 +1,6 @@ +pr: 93445 +summary: Support downsampling of histogram as labels +area: Rollup +type: bug +issues: + - 93263 diff --git a/docs/changelog/93448.yaml b/docs/changelog/93448.yaml new file mode 100644 index 000000000000..54ccc0935836 --- /dev/null +++ b/docs/changelog/93448.yaml @@ -0,0 +1,5 @@ +pr: 93448 +summary: Speed up retrieval of data for flamegraphs +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/93461.yaml b/docs/changelog/93461.yaml new file mode 100644 index 000000000000..e1efa406c72f --- /dev/null +++ b/docs/changelog/93461.yaml @@ -0,0 +1,5 @@ +pr: 93461 +summary: Fallback to the actual shard size when forecast is not available +area: Allocation +type: bug +issues: [] diff --git a/docs/changelog/93551.yaml b/docs/changelog/93551.yaml new file mode 100644 index 000000000000..9c0a81b039e0 --- /dev/null +++ b/docs/changelog/93551.yaml @@ -0,0 +1,6 @@ +pr: 93551 +summary: Disable recovery monitor before recovery start +area: Recovery +type: bug +issues: + - 93542 diff --git a/docs/java-rest/low-level/configuration.asciidoc b/docs/java-rest/low-level/configuration.asciidoc index 18f96858c761..e1102305db1d 100644 --- a/docs/java-rest/low-level/configuration.asciidoc +++ b/docs/java-rest/low-level/configuration.asciidoc @@ -14,7 +14,7 @@ additional configuration for the low-level Java REST Client. Configuring requests timeouts can be done by providing an instance of `RequestConfigCallback` while building the `RestClient` through its builder. The interface has one method that receives an instance of -https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/client/config/RequestConfig.Builder.html[`org.apache.http.client.config.RequestConfig.Builder`] +https://hc.apache.org/httpcomponents-client-4.5.x/current/httpclient/apidocs/org/apache/http/client/config/RequestConfig.Builder.html[`org.apache.http.client.config.RequestConfig.Builder`] as an argument and has the same return type. The request config builder can be modified and then returned. In the following example we increase the connect timeout (defaults to 1 second) and the socket timeout (defaults to 30 @@ -50,7 +50,7 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-config-thre Configuring basic authentication can be done by providing an `HttpClientConfigCallback` while building the `RestClient` through its builder. The interface has one method that receives an instance of -https://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/apache/http/impl/nio/client/HttpAsyncClientBuilder.html[`org.apache.http.impl.nio.client.HttpAsyncClientBuilder`] +https://hc.apache.org/httpcomponents-asyncclient-4.1.x/current/httpasyncclient/apidocs/org/apache/http/impl/nio/client/HttpAsyncClientBuilder.html[`org.apache.http.impl.nio.client.HttpAsyncClientBuilder`] as an argument and has the same return type. The http client builder can be modified and then returned. In the following example we set a default credentials provider that requires basic authentication. @@ -99,7 +99,7 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-auth-api-ke Encrypted communication using TLS can also be configured through the `HttpClientConfigCallback`. The -https://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/apache/http/impl/nio/client/HttpAsyncClientBuilder.html[`org.apache.http.impl.nio.client.HttpAsyncClientBuilder`] +https://hc.apache.org/httpcomponents-asyncclient-4.1.x/current/httpasyncclient/apidocs/org/apache/http/impl/nio/client/HttpAsyncClientBuilder.html[`org.apache.http.impl.nio.client.HttpAsyncClientBuilder`] received as an argument exposes multiple methods to configure encrypted communication: `setSSLContext`, `setSSLSessionStrategy` and `setConnectionManager`, in order of precedence from the least important. diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 68b91e06d6fa..439606281e04 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -57,7 +57,7 @@ dependencies { === Dependencies The low-level Java REST client internally uses the -https://hc.apache.org/httpcomponents-asyncclient-dev/[Apache Http Async Client] +https://hc.apache.org/httpcomponents-asyncclient-4.1.x/[Apache Http Async Client] to send http requests. It depends on the following artifacts, namely the async http client and its own transitive dependencies: @@ -152,7 +152,7 @@ A `RestClient` instance can be built through the corresponding `RestClientBuilder` class, created via `RestClient#builder(HttpHost...)` static method. The only required argument is one or more hosts that the client will communicate with, provided as instances of -https://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/org/apache/http/HttpHost.html[HttpHost] +https://hc.apache.org/httpcomponents-core-4.4.x/current/httpcore/apidocs/org/apache/http/HttpHost.html[HttpHost] as follows: ["source","java",subs="attributes,callouts,macros"] @@ -203,7 +203,7 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-init-reques -------------------------------------------------- <1> Set a callback that allows to modify the default request configuration (e.g. request timeouts, authentication, or anything that the -https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/client/config/RequestConfig.Builder.html[`org.apache.http.client.config.RequestConfig.Builder`] +https://hc.apache.org/httpcomponents-client-4.5.x/current/httpclient/apidocs/org/apache/http/client/config/RequestConfig.Builder.html[`org.apache.http.client.config.RequestConfig.Builder`] allows to set) ["source","java",subs="attributes,callouts,macros"] @@ -212,7 +212,7 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-init-client -------------------------------------------------- <1> Set a callback that allows to modify the http client configuration (e.g. encrypted communication over ssl, or anything that the -https://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/apache/http/impl/nio/client/HttpAsyncClientBuilder.html[`org.apache.http.impl.nio.client.HttpAsyncClientBuilder`] +https://hc.apache.org/httpcomponents-asyncclient-4.1.x/current/httpasyncclient/apidocs/org/apache/http/impl/nio/client/HttpAsyncClientBuilder.html[`org.apache.http.impl.nio.client.HttpAsyncClientBuilder`] allows to set) @@ -339,9 +339,9 @@ translate to the execution of that request being cancelled, which needs to be specifically implemented in the API itself. The use of the `Cancellable` instance is optional and you can safely ignore this -if you don't need it. A typical usecase for this would be using this together with +if you don't need it. A typical usecase for this would be using this together with frameworks like Rx Java or the Kotlin's `suspendCancellableCoRoutine`. Cancelling -no longer needed requests is a good way to avoid putting unnecessary +no longer needed requests is a good way to avoid putting unnecessary load on Elasticsearch. ["source","java",subs="attributes,callouts,macros"] @@ -366,7 +366,7 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response2] <2> The host that returned the response <3> The response status line, from which you can for instance retrieve the status code <4> The response headers, which can also be retrieved by name though `getHeader(String)` -<5> The response body enclosed in an https://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/org/apache/http/HttpEntity.html[`org.apache.http.HttpEntity`] +<5> The response body enclosed in an https://hc.apache.org/httpcomponents-core-4.4.x/current/httpcore/apidocs/org/apache/http/HttpEntity.html[`org.apache.http.HttpEntity`] object When performing a request, an exception is thrown (or received as an argument @@ -395,13 +395,13 @@ and un-marshalling. Users are free to use the library that they prefer for that purpose. The underlying Apache Async Http Client ships with different -https://hc.apache.org/httpcomponents-core-ga/httpcore/apidocs/org/apache/http/HttpEntity.html[`org.apache.http.HttpEntity`] +https://hc.apache.org/httpcomponents-core-4.4.x/current/httpcore/apidocs/org/apache/http/HttpEntity.html[`org.apache.http.HttpEntity`] implementations that allow to provide the request body in different formats (stream, byte array, string etc.). As for reading the response body, the `HttpEntity#getContent` method comes handy which returns an `InputStream` reading from the previously buffered response body. As an alternative, it is possible to provide a custom -https://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`] +https://hc.apache.org/httpcomponents-core-4.4.x/current/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`] that controls how bytes are read and buffered. [[java-rest-low-usage-logging]] diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index 3e6612dd8f4c..f4e8fcf65714 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -2,7 +2,7 @@ === ICU Analysis Plugin The ICU Analysis plugin integrates the Lucene ICU module into {es}, -adding extended Unicode support using the http://site.icu-project.org/[ICU] +adding extended Unicode support using the https://icu.unicode.org/[ICU] libraries, including better analysis of Asian languages, Unicode normalization, Unicode-aware case folding, collation support, and transliteration. @@ -48,7 +48,7 @@ The following parameters are accepted: ==== ICU Normalization Character Filter Normalizes characters as explained -http://userguide.icu-project.org/transforms/normalization[here]. +https://unicode-org.github.io/icu/userguide/transforms/normalization/[here]. It registers itself as the `icu_normalizer` character filter, which is available to all indices without any further configuration. The type of normalization can be specified with the `name` parameter, which accepts `nfc`, @@ -202,7 +202,7 @@ The above `analyze` request returns the following: ==== ICU Normalization Token Filter Normalizes characters as explained -http://userguide.icu-project.org/transforms/normalization[here]. It registers +https://unicode-org.github.io/icu/userguide/transforms/normalization/[here]. It registers itself as the `icu_normalizer` token filter, which is available to all indices without any further configuration. The type of normalization can be specified with the `name` parameter, which accepts `nfc`, `nfkc`, and `nfkc_cf` @@ -554,4 +554,4 @@ GET icu_sample/_analyze <3> Returns `zdravstvujte`. <4> Returns `kon'nichiha`. -For more documentation, Please see the http://userguide.icu-project.org/transforms/general[user guide of ICU Transform]. +For more documentation, Please see the https://unicode-org.github.io/icu/userguide/transforms/[user guide of ICU Transform]. diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index 6a2e7c767bab..1f37a31496c0 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -82,7 +82,7 @@ dictionary to `$ES_HOME/config/userdict_ko.txt`: [source,txt] ----------------------- c++ <1> -C샤프 +C쁠쁠 세종 세종시 세종 시 <2> ----------------------- @@ -176,7 +176,7 @@ PUT nori_sample "nori_user_dict": { "type": "nori_tokenizer", "decompound_mode": "mixed", - "user_dictionary_rules": ["c++", "C샤프", "세종", "세종시 세종 시"] + "user_dictionary_rules": ["c++", "C쁠쁠", "세종", "세종시 세종 시"] } }, "analyzer": { diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index f04c18115230..9d924a8d2606 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -188,6 +188,7 @@ purge the configuration files while removing a plugin, use `-p` or `--purge`. This can option can be used after a plugin is removed to remove any lingering configuration files. +[discrete] [[removing-multiple-plugins]] === Removing multiple plugins diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index a52eb15c9a15..4391d73ebd46 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -36,7 +36,7 @@ include::bucket/filter-aggregation.asciidoc[] include::bucket/filters-aggregation.asciidoc[] -include::bucket/frequent-items-aggregation.asciidoc[] +include::bucket/frequent-item-sets-aggregation.asciidoc[] include::bucket/geodistance-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 405aa6863111..26774c7091d2 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -80,7 +80,8 @@ time zone. One month is the interval between the start day of the month and time of day and the same day of the month and time of the following month in the specified time zone, so that the day of the month and time of day are the same at the start -and end. +and end. Note that the day may differ if an +<>. `quarter`, `1q` :: @@ -543,6 +544,94 @@ NOTE: The start `offset` of each bucket is calculated after `time_zone` adjustments have been made. // end::offset-note[] +[[search-aggregations-bucket-datehistogram-offset-months]] +===== Long offsets over calendar intervals + +It is typical to use offsets in units smaller than the `calendar_interval`. For example, +using offsets in hours when the interval is days, or an offset of days when the interval is months. +If the calendar interval is always of a standard length, or the `offset` is less than one unit of the calendar +interval (for example less than `+24h` for `days` or less than `+28d` for months), +then each bucket will have a repeating start. For example `+6h` for `days` will result in all buckets +starting at 6am each day. However, `+30h` will also result in buckets starting at 6am, except when crossing +days that change from standard to summer-savings time or vice-versa. + +This situation is much more pronounced for months, where each month has a different length +to at least one of its adjacent months. +To demonstrate this, consider eight documents each with a date field on the 20th day of each of the +eight months from January to August of 2022. + +When querying for a date histogram over the calendar interval of months, the response will return one bucket per month, each with a single document. +Each bucket will have a key named after the first day of the month, plus any offset. +For example, the offset of `+19d` will result in buckets with names like `2022-01-20`. + +[source,console,id=datehistogram-aggregation-offset-example-19d] +-------------------------------------------------- +"buckets": [ + { "key_as_string": "2022-01-20", "key": 1642636800000, "doc_count": 1 }, + { "key_as_string": "2022-02-20", "key": 1645315200000, "doc_count": 1 }, + { "key_as_string": "2022-03-20", "key": 1647734400000, "doc_count": 1 }, + { "key_as_string": "2022-04-20", "key": 1650412800000, "doc_count": 1 }, + { "key_as_string": "2022-05-20", "key": 1653004800000, "doc_count": 1 }, + { "key_as_string": "2022-06-20", "key": 1655683200000, "doc_count": 1 }, + { "key_as_string": "2022-07-20", "key": 1658275200000, "doc_count": 1 }, + { "key_as_string": "2022-08-20", "key": 1660953600000, "doc_count": 1 } +] +-------------------------------------------------- +// TESTRESPONSE[skip:no setup made for this example yet] + +Increasing the offset to `+20d`, each document will appear in a bucket for the previous month, +with all bucket keys ending with the same day of the month, as normal. +However, further increasing to `+28d`, +what used to be a February bucket has now become `"2022-03-01"`. + +[source,console,id=datehistogram-aggregation-offset-example-28d] +-------------------------------------------------- +"buckets": [ + { "key_as_string": "2021-12-29", "key": 1640736000000, "doc_count": 1 }, + { "key_as_string": "2022-01-29", "key": 1643414400000, "doc_count": 1 }, + { "key_as_string": "2022-03-01", "key": 1646092800000, "doc_count": 1 }, + { "key_as_string": "2022-03-29", "key": 1648512000000, "doc_count": 1 }, + { "key_as_string": "2022-04-29", "key": 1651190400000, "doc_count": 1 }, + { "key_as_string": "2022-05-29", "key": 1653782400000, "doc_count": 1 }, + { "key_as_string": "2022-06-29", "key": 1656460800000, "doc_count": 1 }, + { "key_as_string": "2022-07-29", "key": 1659052800000, "doc_count": 1 } +] +-------------------------------------------------- +// TESTRESPONSE[skip:no setup made for this example yet] + +If we continue to increase the offset, the 30-day months will also shift into the next month, +so that 3 of the 8 buckets have different days than the other five. +In fact if we keep going, we will find cases where two documents appear in the same month. +Documents that were originally 30 days apart can be shifted into the same 31-day month bucket. + +For example, for `+50d` we see: + +[source,console,id=datehistogram-aggregation-offset-example-50d] +-------------------------------------------------- +"buckets": [ + { "key_as_string": "2022-01-20", "key": 1642636800000, "doc_count": 1 }, + { "key_as_string": "2022-02-20", "key": 1645315200000, "doc_count": 2 }, + { "key_as_string": "2022-04-20", "key": 1650412800000, "doc_count": 2 }, + { "key_as_string": "2022-06-20", "key": 1655683200000, "doc_count": 2 }, + { "key_as_string": "2022-08-20", "key": 1660953600000, "doc_count": 1 } +] +-------------------------------------------------- +// TESTRESPONSE[skip:no setup made for this example yet] + +It is therefor always important when using `offset` with `calendar_interval` bucket sizes +to understand the consequences of using offsets larger than the interval size. + +More examples: + +* If the goal is to, for example, have an annual histogram where each year starts on the 5th February, +you could use `calendar_interval` of `year` and `offset` of `+33d`, and each year will be shifted identically, +because the offset includes only January, which is the same length every year. +However, if the goal is to have the year start on the 5th March instead, this technique will not work because +the offset includes February, which changes length every four years. +* If you want a quarterly histogram starting on a date within the first month of the year, it will work, +but as soon as you push the start date into the second month by having an offset longer than a month, the +quarters will all start on different dates. + [[date-histogram-keyed-response]] ==== Keyed Response @@ -705,7 +794,7 @@ POST /sales/_search?size=0 -------------------------------------------------- // TEST[setup:sales] -<1> Documents without a value in the `publish_date` field will fall into the +<1> Documents without a value in the `date` field will fall into the same bucket as documents that have the value `2000-01-01`. [[date-histogram-order]] diff --git a/docs/reference/aggregations/bucket/frequent-item-sets-aggregation.asciidoc b/docs/reference/aggregations/bucket/frequent-item-sets-aggregation.asciidoc new file mode 100644 index 000000000000..01dacd0d6ccd --- /dev/null +++ b/docs/reference/aggregations/bucket/frequent-item-sets-aggregation.asciidoc @@ -0,0 +1,405 @@ +[[search-aggregations-bucket-frequent-item-sets-aggregation]] +=== Frequent item sets aggregation +++++ +Frequent item sets +++++ + +A bucket aggregation which finds frequent item sets. It is a form of association +rules mining that identifies items that often occur together. Items that are +frequently purchased together or log events that tend to co-occur are examples +of frequent item sets. Finding frequent item sets helps to discover +relationships between different data points (items). + +The aggregation reports closed item sets. A frequent item set is called closed +if no superset exists with the same ratio of documents (also known as its +<>). For example, we have the two +following candidates for a frequent item set, which have the same support value: +1. `apple, orange, banana` +2. `apple, orange, banana, tomato`. +Only the second item set (`apple, orange, banana, tomato`) is returned, and the +first set – which is a subset of the second one – is skipped. Both item sets +might be returned if their support values are different. + +The runtime of the aggregation depends on the data and the provided parameters. +It might take a significant time for the aggregation to complete. For this +reason, it is recommended to use <> to run your +requests asynchronously. + + +==== Syntax + +A `frequent_item_sets` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +"frequent_item_sets": { + "minimum_set_size": 3, + "fields": [ + {"field": "my_field_1"}, + {"field": "my_field_2"} + ] +} +-------------------------------------------------- +// NOTCONSOLE + +.`frequent_item_sets` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`fields` |(array) Fields to analyze. | Required | +|`minimum_set_size` | (integer) The <> of one item set. | Optional | `1` +|`minimum_support` | (integer) The <> of one item set. | Optional | `0.1` +|`size` | (integer) The number of top item sets to return. | Optional | `10` +|`filter` | (object) Query that filters documents from the analysis | Optional | `match_all` +|=== + + +[discrete] +[[frequent-item-sets-fields]] +==== Fields + +Supported field types for the analyzed fields are keyword, numeric, ip, date, +and arrays of these types. You can also add runtime fields to your analyzed +fields. + +If the combined cardinality of the analyzed fields are high, the aggregation +might require a significant amount of system resources. + +You can filter the values for each field by using the `include` and `exclude` +parameters. The parameters can be regular expression strings or arrays of +strings of exact terms. The filtered values are removed from the analysis and +therefore reduce the runtime. If both `include` and `exclude` are defined, +`exclude` takes precedence; it means `include` is evaluated first and then +`exclude`. + +[discrete] +[[frequent-item-sets-minimum-set-size]] +==== Minimum set size + +The minimum set size is the minimum number of items the set needs to contain. A +value of 1 returns the frequency of single items. Only item sets that contain at +least the number of `minimum_set_size` items are returned. For example, the item +set `orange, banana, apple` is returned only if the minimum set size is 3 or +lower. + +[discrete] +[[frequent-item-sets-minimum-support]] +==== Minimum support + +The minimum support value is the ratio of documents that an item set must exist +in to be considered "frequent". In particular, it is a normalized value between +0 and 1. It is calculated by dividing the number of documents containing the +item set by the total number of documents. + +For example, if a given item set is contained by five documents and the total +number of documents is 20, then the support of the item set is 5/20 = 0.25. +Therefore, this set is returned only if the minimum support is 0.25 or lower. +As a higher minimum support prunes more items, the calculation is less resource +intensive. The `minimum_support` parameter has an effect on the required memory +and the runtime of the aggregation. + + +[discrete] +[[frequent-item-sets-size]] +==== Size + +This parameter defines the maximum number of item sets to return. The result +contains top-k item sets; the item sets with the highest support values. This +parameter has a significant effect on the required memory and the runtime of the +aggregation. + + +[discrete] +[[frequent-item-sets-filter]] +==== Filter + +A query to filter documents to use as part of the analysis. Documents that +don't match the filter are ignored when generating the item sets, however still +count when calculating the support of an item set. + +Use the filter if you want to narrow the item set analysis to fields of interest. +Use a top-level query to filter the data set. + + +[discrete] +[[frequent-item-sets-example]] +==== Examples + +In the following examples, we use the e-commerce {kib} sample data set. + + +[discrete] +==== Aggregation with two analyzed fields and an `exclude` parameter + +In the first example, the goal is to find out based on transaction data (1.) +from what product categories the customers purchase products frequently together +and (2.) from which cities they make those purchases. We want to exclude results +where location information is not available (where the city name is `other`). +Finally, we are interested in sets with three or more items, and want to see the +first three frequent item sets with the highest support. + +Note that we use the <> endpoint in this first +example. + +[source,console] +------------------------------------------------- +POST /kibana_sample_data_ecommerce/_async_search +{ + "size":0, + "aggs":{ + "my_agg":{ + "frequent_item_sets":{ + "minimum_set_size":3, + "fields":[ + { + "field":"category.keyword" + }, + { + "field":"geoip.city_name", + "exclude":"other" + } + ], + "size":3 + } + } + } +} +------------------------------------------------- +// TEST[skip:setup kibana sample data] + +The response of the API call above contains an identifier (`id`) of the async +search request. You can use the identifier to retrieve the search results: + +[source,console] +------------------------------------------------- +GET /_async_search/ +------------------------------------------------- +// TEST[skip:setup kibana sample data] + +The API returns a response similar to the following one: + +[source,console-result] +------------------------------------------------- +(...) +"aggregations" : { + "my_agg" : { + "buckets" : [ <1> + { + "key" : { <2> + "category.keyword" : [ + "Women's Clothing", + "Women's Shoes" + ], + "geoip.city_name" : [ + "New York" + ] + }, + "doc_count" : 217, <3> + "support" : 0.04641711229946524 <4> + }, + { + "key" : { + "category.keyword" : [ + "Women's Clothing", + "Women's Accessories" + ], + "geoip.city_name" : [ + "New York" + ] + }, + "doc_count" : 135, + "support" : 0.028877005347593583 + }, + { + "key" : { + "category.keyword" : [ + "Men's Clothing", + "Men's Shoes" + ], + "geoip.city_name" : [ + "Cairo" + ] + }, + "doc_count" : 123, + "support" : 0.026310160427807486 + } + ], + (...) + } +} +------------------------------------------------- +// TEST[skip:setup kibana sample data] + +<1> The array of returned item sets. +<2> The `key` object contains one item set. In this case, it consists of two +values of the `category.keyword` field and one value of the `geoip.city_name`. +<3> The number of documents that contain the item set. +<4> The support value of the item set. It is calculated by dividing the number +of documents containing the item set by the total number of documents. + +The response shows that the categories customers purchase from most frequently +together are `Women's Clothing` and `Women's Shoes` and customers from New York +tend to buy items from these categories frequently together. In other words, +customers who buy products labelled `Women's Clothing` more likely buy products +also from the `Women's Shoes` category and customers from New York most likely +buy products from these categories together. The item set with the second +highest support is `Women's Clothing` and `Women's Accessories` with customers +mostly from New York. Finally, the item set with the third highest support is +`Men's Clothing` and `Men's Shoes` with customers mostly from Cairo. + + +[discrete] +==== Aggregation with two analyzed fields and a filter + +We take the first example, but want to narrow the item sets to places in Europe. +For that, we add a filter, and this time, we don't use the `exclude` parameter: + +[source,console] +------------------------------------------------- +POST /kibana_sample_data_ecommerce/_async_search +{ + "size": 0, + "aggs": { + "my_agg": { + "frequent_item_sets": { + "minimum_set_size": 3, + "fields": [ + { "field": "category.keyword" }, + { "field": "geoip.city_name" } + ], + "size": 3, + "filter": { + "term": { + "geoip.continent_name": "Europe" + } + } + } + } + } +} +------------------------------------------------- +// TEST[skip:setup kibana sample data] + +The result will only show item sets that created from documents matching the +filter, namely purchases in Europe. Using `filter`, the calculated `support` +still takes all purchases into acount. That's different than specifying a query +at the top-level, in which case `support` gets calculated only from purchases in +Europe. + + +[discrete] +==== Analyzing numeric values by using a runtime field + +The frequent items aggregation enables you to bucket numeric values by using +<>. The next example demonstrates how to use a script to +add a runtime field to your documents called `price_range`, which is +calculated from the taxful total price of the individual transactions. The +runtime field then can be used in the frequent items aggregation as a field to +analyze. + + +[source,console] +------------------------------------------------- +GET kibana_sample_data_ecommerce/_search +{ + "runtime_mappings": { + "price_range": { + "type": "keyword", + "script": { + "source": """ + def bucket_start = (long) Math.floor(doc['taxful_total_price'].value / 50) * 50; + def bucket_end = bucket_start + 50; + emit(bucket_start.toString() + "-" + bucket_end.toString()); + """ + } + } + }, + "size": 0, + "aggs": { + "my_agg": { + "frequent_item_sets": { + "minimum_set_size": 4, + "fields": [ + { + "field": "category.keyword" + }, + { + "field": "price_range" + }, + { + "field": "geoip.city_name" + } + ], + "size": 3 + } + } + } +} +------------------------------------------------- +// TEST[skip:setup kibana sample data] + +The API returns a response similar to the following one: + +[source,console-result] +------------------------------------------------- +(...) +"aggregations" : { + "my_agg" : { + "buckets" : [ + { + "key" : { + "category.keyword" : [ + "Women's Clothing", + "Women's Shoes" + ], + "price_range" : [ + "50-100" + ], + "geoip.city_name" : [ + "New York" + ] + }, + "doc_count" : 100, + "support" : 0.0213903743315508 + }, + { + "key" : { + "category.keyword" : [ + "Women's Clothing", + "Women's Shoes" + ], + "price_range" : [ + "50-100" + ], + "geoip.city_name" : [ + "Dubai" + ] + }, + "doc_count" : 59, + "support" : 0.012620320855614974 + }, + { + "key" : { + "category.keyword" : [ + "Men's Clothing", + "Men's Shoes" + ], + "price_range" : [ + "50-100" + ], + "geoip.city_name" : [ + "Marrakesh" + ] + }, + "doc_count" : 53, + "support" : 0.011336898395721925 + } + ], + (...) + } + } +------------------------------------------------- +// TEST[skip:setup kibana sample data] + +The response shows the categories that customers purchase from most frequently +together, the location of the customers who tend to buy items from these +categories, and the most frequent price ranges of these purchases. diff --git a/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc b/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc deleted file mode 100644 index 811f08c98ca0..000000000000 --- a/docs/reference/aggregations/bucket/frequent-items-aggregation.asciidoc +++ /dev/null @@ -1,393 +0,0 @@ -[[search-aggregations-bucket-frequent-items-aggregation]] -=== Frequent items aggregation -++++ -Frequent items -++++ - -experimental::[] - -A bucket aggregation which finds frequent item sets. It is a form of association -rules mining that identifies items that often occur together. Items that are -frequently purchased together or log events that tend to co-occur are examples -of frequent item sets. Finding frequent item sets helps to discover -relationships between different data points (items). - -The aggregation reports closed item sets. A frequent item set is called closed -if no superset exists with the same ratio of documents (also known as its -<>). For example, we have the two -following candidates for a frequent item set, which have the same support value: -1. `apple, orange, banana` -2. `apple, orange, banana, tomato`. -Only the second item set (`apple, orange, banana, tomato`) is returned, and the -first set – which is a subset of the second one – is skipped. Both item sets -might be returned if their support values are different. - -The runtime of the aggregation depends on the data and the provided parameters. -It might take a significant time for the aggregation to complete. For this -reason, it is recommended to use <> to run your -requests asynchronously. - - -==== Syntax - -A `frequent_items` aggregation looks like this in isolation: - -[source,js] --------------------------------------------------- -"frequent_items": { - "minimum_set_size": 3, - "fields": [ - {"field": "my_field_1"}, - {"field": "my_field_2"} - ] -} --------------------------------------------------- -// NOTCONSOLE - -.`frequent_items` Parameters -|=== -|Parameter Name |Description |Required |Default Value -|`fields` |(array) Fields to analyze. | Required | -|`minimum_set_size` | (integer) The <> of one item set. | Optional | `1` -|`minimum_support` | (integer) The <> of one item set. | Optional | `0.1` -|`size` | (integer) The number of top item sets to return. | Optional | `10` -|`filter` | (object) Query that filters documents from the analysis | Optional | `match_all` -|=== - - -[discrete] -[[frequent-items-fields]] -==== Fields - -Supported field types for the analyzed fields are keyword, numeric, ip, date, -and arrays of these types. You can also add runtime fields to your analyzed -fields. - -If the combined cardinality of the analyzed fields are high, then the -aggregation might require a significant amount of system resources. - -[discrete] -[[frequent-items-minimum-set-size]] -==== Minimum set size - -The minimum set size is the minimum number of items the set needs to contain. A -value of 1 returns the frequency of single items. Only item sets that contain at -least the number of `minimum_set_size` items are returned. For example, the item -set `orange, banana, apple` is returned only if the minimum set size is 3 or -lower. - -[discrete] -[[frequent-items-minimum-support]] -==== Minimum support - -The minimum support value is the ratio of documents that an item set must exist -in to be considered "frequent". In particular, it is a normalized value between -0 and 1. It is calculated by dividing the number of documents containing the -item set by the total number of documents. - -For example, if a given item set is contained by five documents and the total -number of documents is 20, then the support of the item set is 5/20 = 0.25. -Therefore, this set is returned only if the minimum support is 0.25 or lower. -As a higher minimum support prunes more items, the calculation is less resource -intensive. The `minimum_support` parameter has an effect on the required memory -and the runtime of the aggregation. - - -[discrete] -[[frequent-items-size]] -==== Size - -This parameter defines the maximum number of item sets to return. The result -contains top-k item sets; the item sets with the highest support values. This -parameter has a significant effect on the required memory and the runtime of the -aggregation. - - -[discrete] -[[frequent-items-filter]] -==== Filter - -A query to filter documents to use as part of the analysis. Documents that -don't match the filter are ignored when generating the item sets, however still -count when calculating the support of an item set. - -Use the filter if you want to narrow the item set analysis to fields of interest. -Use a top-level query to filter the data set. - - -[discrete] -[[frequent-items-example]] -==== Examples - -In the following examples, we use the e-commerce {kib} sample data set. - - -[discrete] -==== Aggregation with two analyzed fields - -In the first example, the goal is to find out based on transaction data (1.) -from what product categories the customers purchase products frequently together -and (2.) from which cities they make those purchases. We are interested in sets -with three or more items, and want to see the first three frequent item sets -with the highest support. - -Note that we use the <> endpoint in this first -example. - -[source,console] -------------------------------------------------- -POST /kibana_sample_data_ecommerce/_async_search -{ - "size": 0, - "aggs": { - "my_agg": { - "frequent_items": { - "minimum_set_size": 3, - "fields": [ - { "field": "category.keyword" }, - { "field": "geoip.city_name" } - ], - "size": 3 - } - } - } -} -------------------------------------------------- -// TEST[skip:setup kibana sample data] - -The response of the API call above contains an identifier (`id`) of the async -search request. You can use the identifier to retrieve the search results: - -[source,console] -------------------------------------------------- -GET /_async_search/ -------------------------------------------------- -// TEST[skip:setup kibana sample data] - -The API returns a response similar to the following one: - -[source,console-result] -------------------------------------------------- -(...) -"aggregations" : { - "my_agg" : { - "buckets" : [ <1> - { - "key" : { <2> - "category.keyword" : [ - "Women's Clothing", - "Women's Shoes" - ], - "geoip.city_name" : [ - "New York" - ] - }, - "doc_count" : 217, <3> - "support" : 0.04641711229946524 <4> - }, - { - "key" : { - "category.keyword" : [ - "Women's Clothing", - "Women's Accessories" - ], - "geoip.city_name" : [ - "New York" - ] - }, - "doc_count" : 135, - "support" : 0.028877005347593583 - }, - { - "key" : { - "category.keyword" : [ - "Men's Clothing", - "Men's Shoes" - ], - "geoip.city_name" : [ - "Cairo" - ] - }, - "doc_count" : 123, - "support" : 0.026310160427807486 - } - ], - (...) - } -} -------------------------------------------------- -// TEST[skip:setup kibana sample data] - -<1> The array of returned item sets. -<2> The `key` object contains one item set. In this case, it consists of two -values of the `category.keyword` field and one value of the `geoip.city_name`. -<3> The number of documents that contain the item set. -<4> The support value of the item set. It is calculated by dividing the number -of documents containing the item set by the total number of documents. - -The response shows that the categories customers purchase from most frequently -together are `Women's Clothing` and `Women's Shoes` and customers from New York -tend to buy items from these categories frequently togeher. In other words, -customers who buy products labelled `Women's Clothing` more likely buy products -also from the `Women's Shoes` category and customers from New York most likely buy -products from these categories together. The item set with the second highest -support is `Women's Clothing` and `Women's Accessories` with customers mostly -from New York. Finally, the item set with the third highest support is -`Men's Clothing` and `Men's Shoes` with customers mostly from Cairo. - - -[discrete] -==== Aggregation with two analyzed fields and a filter - -We take the first example, but want to narrow the item sets to places in Europe. -For that we add a filter: - -[source,console] -------------------------------------------------- -POST /kibana_sample_data_ecommerce/_async_search -{ - "size": 0, - "aggs": { - "my_agg": { - "frequent_items": { - "minimum_set_size": 3, - "fields": [ - { "field": "category.keyword" }, - { "field": "geoip.city_name" } - ], - "size": 3, - "filter": { - "term": { - "geoip.continent_name": "Europe" - } - } - } - } - } -} -------------------------------------------------- -// TEST[skip:setup kibana sample data] - -The result will only show item sets that created from documents matching the -filter, namely purchases in Europe. Using `filter`, the calculated `support` still -takes all purchases into acount. That's different than specifying a query at the -top-level, in which case `support` gets calculated only from purchases in Europe. - - -[discrete] -==== Analyzing numeric values by using a runtime field - -The frequent items aggregation enables you to bucket numeric values by using -<>. The next example demonstrates how to use a script to -add a runtime field to your documents called `price_range`, which is -calculated from the taxful total price of the individual transactions. The -runtime field then can be used in the frequent items aggregation as a field to -analyze. - - -[source,console] -------------------------------------------------- -GET kibana_sample_data_ecommerce/_search -{ - "runtime_mappings": { - "price_range": { - "type": "keyword", - "script": { - "source": """ - def bucket_start = (long) Math.floor(doc['taxful_total_price'].value / 50) * 50; - def bucket_end = bucket_start + 50; - emit(bucket_start.toString() + "-" + bucket_end.toString()); - """ - } - } - }, - "size": 0, - "aggs": { - "my_agg": { - "frequent_items": { - "minimum_set_size": 4, - "fields": [ - { - "field": "category.keyword" - }, - { - "field": "price_range" - }, - { - "field": "geoip.city_name" - } - ], - "size": 3 - } - } - } -} -------------------------------------------------- -// TEST[skip:setup kibana sample data] - -The API returns a response similar to the following one: - -[source,console-result] -------------------------------------------------- -(...) -"aggregations" : { - "my_agg" : { - "buckets" : [ - { - "key" : { - "category.keyword" : [ - "Women's Clothing", - "Women's Shoes" - ], - "price_range" : [ - "50-100" - ], - "geoip.city_name" : [ - "New York" - ] - }, - "doc_count" : 100, - "support" : 0.0213903743315508 - }, - { - "key" : { - "category.keyword" : [ - "Women's Clothing", - "Women's Shoes" - ], - "price_range" : [ - "50-100" - ], - "geoip.city_name" : [ - "Dubai" - ] - }, - "doc_count" : 59, - "support" : 0.012620320855614974 - }, - { - "key" : { - "category.keyword" : [ - "Men's Clothing", - "Men's Shoes" - ], - "price_range" : [ - "50-100" - ], - "geoip.city_name" : [ - "Marrakesh" - ] - }, - "doc_count" : 53, - "support" : 0.011336898395721925 - } - ], - (...) - } - } -------------------------------------------------- -// TEST[skip:setup kibana sample data] - -The response shows the categories that customers purchase from most frequently -together, the location of the customers who tend to buy items from these -categories, and the most frequent price ranges of these purchases. \ No newline at end of file diff --git a/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc index 1b47a2cf2cd4..ce0fea4f4ea9 100644 --- a/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geohexgrid-aggregation.asciidoc @@ -5,8 +5,8 @@ Geohex grid ++++ -A multi-bucket aggregation that groups <> -values into buckets that represent a grid. +A multi-bucket aggregation that groups <> and +<> values into buckets that represent a grid. The resulting grid can be sparse and only contains cells that have matching data. Each cell corresponds to a https://h3geo.org/docs/core-library/h3Indexing#h3-cell-indexp[H3 cell index] and is @@ -18,7 +18,7 @@ Precision for this aggregation can be between 0 and 15, inclusive. WARNING: High-precision requests can be very expensive in terms of RAM and result sizes. For example, the highest-precision geohex with a precision of 15 -produces cells that cover less than 10cm by 10cm. We recommend you use a +produces cells that cover less than one square meter. We recommend you use a filter to limit high-precision requests to a smaller geographic area. For an example, refer to <>. @@ -220,21 +220,45 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] -[[geohexgrid-options]] +[discrete] +[role="xpack"] +[[geohexgrid-aggregating-geo-shape]] +==== Aggregating `geo_shape` fields + +Aggregating on <> fields works almost as it does for points. There are two key differences: + +* When aggregating over `geo_point` data, points are considered within a hexagonal tile if they lie +within the edges defined by great circles. In other words the calculation is done using spherical coordinates. +However, when aggregating over `geo_shape` data, the shapes are considered within a hexagon if they lie +within the edges defined as straight lines on an equirectangular projection. +The reason is that Elasticsearch and Lucene treat edges using the equirectangular projection at index and search time. +In order to ensure that search results and aggregation results are aligned, we therefore also use equirectangular +projection in aggregations. +For most data, the difference is subtle or not noticed. +However, for low zoom levels (low precision), especially far from the equator, this can be noticeable. +For example, if the same point data is indexed as `geo_point` and `geo_shape`, it is possible to get +different results when aggregating at lower resolutions. +* As is the case with <>, +a single shape can be counted for in multiple tiles. A shape will contribute to the count of matching values +if any part of its shape intersects with that tile. Below is an image that demonstrates this: + + +image:images/spatial/geoshape_hexgrid.png[] + ==== Options [horizontal] field:: -(Required, string) Field containing indexed geo-point values. Must be explicitly -mapped as a <> field. If the field contains an array, -`geohex_grid` aggregates all array values. +(Required, string) Field containing indexed geo-point or geo-shape values. +Must be explicitly mapped as a <> or a <> field. +If the field contains an array, `geohex_grid` aggregates all array values. precision:: (Optional, integer) Integer zoom of the key used to define cells/buckets in the results. Defaults to `6`. Values outside of [`0`,`15`] will be rejected. bounds:: -(Optional, object) Bounding box used to filter the geo-points in each bucket. +(Optional, object) Bounding box used to filter the geo-points or geo-shapes in each bucket. Accepts the same bounding box formats as the <>. @@ -245,5 +269,6 @@ documents they contain. shard_size:: (Optional, integer) Number of buckets returned from each shard. Defaults to -`max(10,(size x number-of-shards))` to allow for more a accurate count of the -top cells in the final result. +`max(10,(size x number-of-shards))` to allow for a more accurate count of the +top cells in the final result. Since each shard could have a different top result order, +using a larger number here reduces the risk of inaccurate counts, but incurs a performance cost. diff --git a/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc index db5dc58f23cc..413cc622d5d0 100644 --- a/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc @@ -17,7 +17,7 @@ cover only a small area. * Low precision keys have a smaller range for x and y, and represent tiles that each cover a large area. -See https://wiki.openstreetmap.org/wiki/Zoom_levels[Zoom level documentation] +See https://wiki.openstreetmap.org/wiki/Zoom_levels[zoom level documentation] on how precision (zoom) correlates to size on the ground. Precision for this aggregation can be between 0 and 29, inclusive. @@ -102,14 +102,15 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] +[[geotilegrid-high-precision]] ==== High-precision requests -When requesting detailed buckets (typically for displaying a "zoomed in" map) +When requesting detailed buckets (typically for displaying a "zoomed in" map), a filter like <> should be -applied to narrow the subject area otherwise potentially millions of buckets +applied to narrow the subject area. Otherwise, potentially millions of buckets will be created and returned. -[source,console] +[source,console,id=geotilegrid-high-precision-ex] -------------------------------------------------- POST /museums/_search?size=0 { @@ -137,6 +138,8 @@ POST /museums/_search?size=0 -------------------------------------------------- // TEST[continued] +Response: + [source,console-result] -------------------------------------------------- { @@ -166,13 +169,14 @@ POST /museums/_search?size=0 -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] +[[geotilegrid-addtl-bounding-box-filtering]] ==== Requests with additional bounding box filtering The `geotile_grid` aggregation supports an optional `bounds` parameter -that restricts the cells considered to those that intersects the -bounds provided. The `bounds` parameter accepts the bounding box in -all the same <> of the -bounds specified in the Geo Bounding Box Query. This bounding box can be used with or +that restricts the cells considered to those that intersect the +provided bounds. The `bounds` parameter accepts the same +<> +as the geo-bounding box query. This bounding box can be used with or without an additional `geo_bounding_box` query for filtering the points prior to aggregating. It is an independent bounding box that can intersect with, be equal to, or be disjoint to any additional `geo_bounding_box` queries defined in the context of the aggregation. @@ -197,6 +201,8 @@ POST /museums/_search?size=0 -------------------------------------------------- // TEST[continued] +Response: + [source,console-result] -------------------------------------------------- { @@ -225,9 +231,10 @@ POST /museums/_search?size=0 [discrete] [role="xpack"] +[[geotilegrid-aggregating-geo-shape]] ==== Aggregating `geo_shape` fields -Aggregating on <> fields works just as it does for points, except that a single +Aggregating on <> fields works almost as it does for points, except that a single shape can be counted for in multiple tiles. A shape will contribute to the count of matching values if any part of its shape intersects with that tile. Below is an image that demonstrates this: @@ -237,20 +244,27 @@ image:images/spatial/geoshape_grid.png[] ==== Options [horizontal] -field:: Mandatory. The name of the field indexed with GeoPoints. +field:: +(Required, string) Field containing indexed geo-point or geo-shape values. +Must be explicitly mapped as a <> or a <> field. +If the field contains an array, `geotile_grid` aggregates all array values. -precision:: Optional. The integer zoom of the key used to define - cells/buckets in the results. Defaults to 7. - Values outside of [0,29] will be rejected. +precision:: +(Optional, integer) Integer zoom of the key used to define cells/buckets in +the results. Defaults to `7`. Values outside of [`0`,`29`] will be rejected. -bounds: Optional. The bounding box to filter the points in the bucket. +bounds:: +(Optional, object) Bounding box used to filter the geo-points or geo-shapes in each bucket. +Accepts the same bounding box formats as the +<>. -size:: Optional. The maximum number of geohash buckets to return - (defaults to 10,000). When results are trimmed, buckets are - prioritised based on the volumes of documents they contain. +size:: +(Optional, integer) Maximum number of buckets to return. Defaults to 10,000. +When results are trimmed, buckets are prioritized based on the volume of +documents they contain. -shard_size:: Optional. To allow for more accurate counting of the top cells - returned in the final result the aggregation defaults to - returning `max(10,(size x number-of-shards))` buckets from each - shard. If this heuristic is undesirable, the number considered - from each shard can be over-ridden using this parameter. +shard_size:: +(Optional, integer) Number of buckets returned from each shard. Defaults to +`max(10,(size x number-of-shards))` to allow for a more accurate count of the +top cells in the final result. Since each shard could have a different top result order, +using a larger number here reduces the risk of inaccurate counts, but incurs a performance cost. diff --git a/docs/reference/analysis/token-graphs.asciidoc b/docs/reference/analysis/token-graphs.asciidoc index 9881afe908eb..55d69695bd62 100644 --- a/docs/reference/analysis/token-graphs.asciidoc +++ b/docs/reference/analysis/token-graphs.asciidoc @@ -34,7 +34,7 @@ include tokens for multi-word synonyms, such as using "atm" as a synonym for "automatic teller machine." However, only some token filters, known as _graph token filters_, accurately -record the `positionLength` for multi-position tokens. This filters include: +record the `positionLength` for multi-position tokens. These filters include: * <> * <> @@ -105,4 +105,4 @@ in an invalid graph. image::images/analysis/token-graph-dns-invalid-ex.svg[align="center"] Avoid using invalid token graphs for search. Invalid graphs can cause unexpected -search results. \ No newline at end of file +search results. diff --git a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc index 1a2b230cf520..26ced6ad7bb2 100644 --- a/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc +++ b/docs/reference/autoscaling/deciders/machine-learning-decider.asciidoc @@ -2,25 +2,26 @@ [[autoscaling-machine-learning-decider]] === Machine learning decider -The {ml} decider (`ml`) calculates the memory required to run {ml} jobs. +The {ml} decider (`ml`) calculates the memory and CPU requirements to run {ml} +jobs and trained models. The {ml} decider is enabled for policies governing `ml` nodes. -NOTE: For {ml} jobs to open when the cluster is not appropriately -scaled, set `xpack.ml.max_lazy_ml_nodes` to the largest number of possible {ml} -jobs (refer to <> for more information). In {ess}, this is +NOTE: For {ml} jobs to open when the cluster is not appropriately scaled, set +`xpack.ml.max_lazy_ml_nodes` to the largest number of possible {ml} nodes (refer +to <> for more information). In {ess}, this is automatically set. [[autoscaling-machine-learning-decider-settings]] ==== Configuration settings Both `num_anomaly_jobs_in_queue` and `num_analytics_jobs_in_queue` are designed -to delay a scale-up event. If the cluster is too small, these settings indicate how many jobs of each type can be -unassigned from a node. Both settings are -only considered for jobs that can be opened given the current scale. If a job is -too large for any node size or if a job can't be assigned without user -intervention (for example, a user calling `_stop` against a real-time -{anomaly-job}), the numbers are ignored for that particular job. +to delay a scale-up event. If the cluster is too small, these settings indicate +how many jobs of each type can be unassigned from a node. Both settings are only +considered for jobs that can be opened given the current scale. If a job is too +large for any node size or if a job can't be assigned without user intervention +(for example, a user calling `_stop` against a real-time {anomaly-job}), the +numbers are ignored for that particular job. `num_anomaly_jobs_in_queue`:: (Optional, integer) diff --git a/docs/reference/cluster/get-desired-balance.asciidoc b/docs/reference/cluster/get-desired-balance.asciidoc index caa77561a08c..a9db8838ee27 100644 --- a/docs/reference/cluster/get-desired-balance.asciidoc +++ b/docs/reference/cluster/get-desired-balance.asciidoc @@ -32,6 +32,90 @@ The API returns the following result: "computation_time_in_millis": 0, "reconciliation_time_in_millis": 0 }, + "cluster_balance_stats" : { + "tiers": { + "data_hot" : { + "shard_count" : { + "total" : 7.0, + "min" : 2.0, + "max" : 3.0, + "average" : 2.3333333333333335, + "std_dev" : 0.4714045207910317 + }, + "forecast_write_load" : { + "total" : 21.0, + "min" : 6.0, + "max" : 8.5, + "average" : 7.0, + "std_dev" : 1.0801234497346435 + }, + "forecast_disk_usage" : { + "total" : 36.0, + "min" : 10.0, + "max" : 16.0, + "average" : 12.0, + "std_dev" : 2.8284271247461903 + }, + "actual_disk_usage" : { + "total" : 36.0, + "min" : 10.0, + "max" : 16.0, + "average" : 12.0, + "std_dev" : 2.8284271247461903 + } + }, + "data_warm" : { + "shard_count" : { + "total" : 3.0, + "min" : 1.0, + "max" : 1.0, + "average" : 1.0, + "std_dev" : 0.0 + }, + "forecast_write_load" : { + "total" : 0.0, + "min" : 0.0, + "max" : 0.0, + "average" : 0.0, + "std_dev" : 0.0 + }, + "forecast_disk_usage" : { + "total" : 42.0, + "min" : 12.0, + "max" : 18.0, + "average" : 14.0, + "std_dev" : 2.8284271247461903 + }, + "actual_disk_usage" : { + "total" : 42.0, + "min" : 12.0, + "max" : 18.0, + "average" : 14.0, + "std_dev" : 2.8284271247461903 + } + } + }, + "nodes": { + "node-1": { + "shard_count": 10, + "forecast_write_load": 8.5, + "forecast_disk_usage_bytes": 498435, + "actual_disk_usage_bytes": 498435 + }, + "node-2": { + "shard_count": 15, + "forecast_write_load": 3.25, + "forecast_disk_usage_bytes": 384935, + "actual_disk_usage_bytes": 384935 + }, + "node-3": { + "shard_count": 12, + "forecast_write_load": 6.0, + "forecast_disk_usage_bytes": 648766, + "actual_disk_usage_bytes": 648766 + } + } + }, "routing_table": { "test": { "0": { @@ -44,7 +128,9 @@ The API returns the following result: "relocating_node": null, "relocating_node_is_desired": false, "shard_id": 0, - "index": "test" + "index": "test", + "forecast_write_load": 8.0, + "forecast_shard_size_in_bytes": 1024 } ], "desired": { @@ -66,7 +152,9 @@ The API returns the following result: "relocating_node": null, "relocating_node_is_desired": false, "shard_id": 1, - "index": "test" + "index": "test", + "forecast_write_load": null, + "forecast_shard_size_in_bytes": null } ], "desired": { diff --git a/docs/reference/cluster/get-settings.asciidoc b/docs/reference/cluster/get-settings.asciidoc index 16b88f605010..931ebc9759a8 100644 --- a/docs/reference/cluster/get-settings.asciidoc +++ b/docs/reference/cluster/get-settings.asciidoc @@ -37,7 +37,7 @@ defined, but can also include the default settings by calling the include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=flat-settings] `include_defaults`:: - (Optional, Boolean) If `true`, returns all default cluster settings. + (Optional, Boolean) If `true`, returns default cluster settings from the local node. Defaults to `false`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] \ No newline at end of file +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index da0402cac171..98e22eaafd84 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -2679,8 +2679,8 @@ requests. [[cluster-nodes-stats-api-example]] ==== {api-examples-title} -[source,console] --------------------------------------------------- +[source,console,id=nodes-stats-limit] +---- # return just indices GET /_nodes/stats/indices @@ -2689,8 +2689,7 @@ GET /_nodes/stats/os,process # return just process for node with IP address 10.0.0.1 GET /_nodes/10.0.0.1/stats/process --------------------------------------------------- -// TESTRESPONSE[skip:"AwaitsFix https://github.com/elastic/elasticsearch/issues/91081"] +---- All stats can be explicitly requested via `/_nodes/stats/_all` or `/_nodes/stats?metric=_all`. @@ -2698,8 +2697,8 @@ All stats can be explicitly requested via `/_nodes/stats/_all` or You can get information about indices stats on `node`, `indices`, or `shards` level. -[source,console] --------------------------------------------------- +[source,console,id=nodes-stats-indices] +---- # Fielddata summarized by node GET /_nodes/stats/indices/fielddata?fields=field1,field2 @@ -2711,21 +2710,19 @@ GET /_nodes/stats/indices/fielddata?level=shards&fields=field1,field2 # You can use wildcards for field names GET /_nodes/stats/indices/fielddata?fields=field* --------------------------------------------------- -// TESTRESPONSE[skip:"AwaitsFix https://github.com/elastic/elasticsearch/issues/91081"] +---- You can get statistics about search groups for searches executed on this node. -[source,console] --------------------------------------------------- +[source,console,id=nodes-stats-groups] +---- # All groups with all stats GET /_nodes/stats?groups=_all # Some groups from just the indices stats GET /_nodes/stats/indices?groups=foo,bar --------------------------------------------------- -// TESTRESPONSE[skip:"AwaitsFix https://github.com/elastic/elasticsearch/issues/91081"] +---- [[cluster-nodes-stats-ingest-ex]] ===== Retrieve ingest statistics only @@ -2734,25 +2731,23 @@ To return only ingest-related node statistics, set the `` path parameter to `ingest` and use the <> query parameter. -[source,console] --------------------------------------------------- +[source,console,id=nodes-stats-filter-path] +---- GET /_nodes/stats/ingest?filter_path=nodes.*.ingest --------------------------------------------------- -// TESTRESPONSE[skip:"AwaitsFix https://github.com/elastic/elasticsearch/issues/91081"] +---- You can use the `metric` and `filter_path` query parameters to get the same response. -[source,console] --------------------------------------------------- +[source,console,id=nodes-stats-metric-filter-path] +---- GET /_nodes/stats?metric=ingest&filter_path=nodes.*.ingest --------------------------------------------------- +---- To further refine the response, change the `filter_path` value. For example, the following request only returns ingest pipeline statistics. -[source,console] --------------------------------------------------- +[source,console,id=nodes-stats-metric-filter-path-refined] +---- GET /_nodes/stats?metric=ingest&filter_path=nodes.*.ingest.pipelines --------------------------------------------------- -// TESTRESPONSE[skip:"AwaitsFix https://github.com/elastic/elasticsearch/issues/91081"] +---- diff --git a/docs/reference/cluster/prevalidate-node-removal.asciidoc b/docs/reference/cluster/prevalidate-node-removal.asciidoc index b6312e37f025..d7f0ed64d6c0 100644 --- a/docs/reference/cluster/prevalidate-node-removal.asciidoc +++ b/docs/reference/cluster/prevalidate-node-removal.asciidoc @@ -21,7 +21,9 @@ Prevalidate node removal. [[prevalidate-node-removal-api-desc]] ==== {api-description-title} -This API checks whether attempting to remove the specified node(s) from the cluster is likely to succeed or not. For a cluster with no unassigned shards, removal of any node is considered safe which means the removal of the nodes is likely to succeed. In case the cluster has a <>, it verifies that the removal of the node(s) would not risk removing the last remaining copy of an unassigned shard. +This API checks whether attempting to remove the specified node(s) from the cluster is likely to succeed or not. For a cluster with no unassigned shards, removal of any node is considered safe which means the removal of the nodes is likely to succeed. + +In case the cluster has a <>, it verifies that the removal of the node(s) would not risk removing the last remaining copy of an unassigned shard. If there are red indices in the cluster, the API checks whether the red indices are <> indices, and if not, it sends a request to each of nodes specified in the API call to verify whether the nodes might contain local shard copies of the red indices that are not Searchable Snapshot indices. This request is processed on each receiving node, by checking whether the node has a shard directory for any of the red index shards. The response includes the overall safety of the removal of the specified nodes, and a detailed response for each node. The node-specific part of the response also includes more details on why removal of that node might not succeed. @@ -32,7 +34,7 @@ Note that if the prevalidation result for a set of nodes returns `true` (i.e. it [[prevalidate-node-removal-api-query-params]] ==== {api-query-parms-title} -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] `names`:: (Optional, string) Comma-separated list of node names. @@ -54,11 +56,50 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=master-timeout] `nodes`:: (object) Prevalidation result for the removal of each of the provided nodes. ++ +.Properties of `nodes` +[%collapsible%open] +==== +``:: + (object) Contains information about the removal prevalidation of a specific node. ++ +.Properties of `` +[%collapsible%open] +======= +`id`:: + (string) node ID +`name`:: + (string) node name +`external_id`:: + (string) node external ID +`result`:: + (object) Contains removal prevalidation result of the node. ++ +.Properties of `result` +[%collapsible%open] +======== +`is_safe`:: + (boolean) Whether the removal of the node is considered safe or not. +`reason`:: + (string) A string that specifies the reason why the prevalidation result is considered safe or not. It can be one of the following values: ++ +-- + * `no_problems`: The prevalidation did not find any issues that could prevent the node from being safely removed. + * `no_red_shards_except_searchable_snapshots`: The node can be safely removed as all red indices are searchable snapshot indices and therefore removing a node does not risk removing the last copy of that index from the cluster. + * `no_red_shards_on_node`: The node does not contain any copies of the red non-searchable-snapshot index shards. + * `red_shards_on_node`: The node might contain shard copies of some non-searchable-snapshot red indices. The list of the shards that might be on the node are specified in the `message` field. + * `unable_to_verify_red_shards`: Contacting the node failed or timed out. More details is provided in the `message` field. +-- +`message`:: + (Optional, string) Detailed information about the removal prevalidation result. +======== +======= +==== [[prevalidate-node-removal-api-example]] ==== {api-examples-title} -This example validates whether it is safe to remove the nodes `node1` and `node2`. The response indicates that it is safe to remove `node1`, but it might not be safe to remove `node2`. Therefore, the overall prevalidation of the removal of the two nodes returns `false`. +This example validates whether it is safe to remove the nodes `node1` and `node2`. The response indicates that it is safe to remove `node1`, but it might not be safe to remove `node2` as it might contain copies of the specified red shards. Therefore, the overall prevalidation of the removal of the two nodes returns `false`. [source,console] -------------------------------------------------- @@ -72,7 +113,7 @@ The API returns the following response: -------------------------------------------------- { "is_safe": false, - "message": "cluster health is RED", + "message": "removal of the following nodes might not be safe: [node2-id]", "nodes": [ { "id": "node1-id", @@ -80,6 +121,7 @@ The API returns the following response: "external_id" : "node1-externalId", "result" : { "is_safe": true, + "reason": "no_red_shards_on_node", "message": "" } }, @@ -89,7 +131,8 @@ The API returns the following response: "external_id" : "node2-externalId", "result" : { "is_safe": false, - "message": "node may contain a copy of a red index shard" + "reason": "red_shards_on_node", + "message": "node contains copies of the following red shards: [[indexName][0]]" } } ] diff --git a/docs/reference/commands/cli-jvm-options.asciidoc b/docs/reference/commands/cli-jvm-options.asciidoc new file mode 100644 index 000000000000..74af2c81d8d4 --- /dev/null +++ b/docs/reference/commands/cli-jvm-options.asciidoc @@ -0,0 +1,13 @@ +[[cli-tool-jvm-options-{tool-name}]] +[float] +==== JVM options + +CLI tools run with 64MB of heap. For most tools, this value is fine. However, if needed +this can be overriden by setting the CLI_JAVA_OPTS environment variable. For example, +the following increases the heap size used by the `pass:a[{tool-name}]` tool to 1GB. + +[source,shell,subs=attributes+] +-------------------------------------------------- +export CLI_JAVA_OPTS="-Xmx1g" +bin/elasticsearch-{tool-name} ... +-------------------------------------------------- diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index 4582d9b2b7ca..e36ed42f5199 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -50,6 +50,10 @@ This tool has a number of modes: {es}. This may sometimes allow you to downgrade to an earlier version of {es}. +:tool-name: node +include::cli-jvm-options.asciidoc[] +:!tool-name: + [[node-tool-repurpose]] [discrete] ==== Changing the role of a node @@ -281,11 +285,13 @@ Unsafe cluster bootstrapping is only possible if there is at least one surviving master-eligible node. If there are no remaining master-eligible nodes then the cluster metadata is completely lost. However, the individual data nodes also contain a copy of the index metadata corresponding with their -shards. This sometimes allows a new cluster to import these shards as -<>. You can sometimes -recover some indices after the loss of all master-eligible nodes in a cluster -by creating a new cluster and then using the `elasticsearch-node -detach-cluster` command to move any surviving nodes into this new cluster. +shards. It is therefore sometimes possible to manually import these shards as +<>. For example you can sometimes recover some +indices after the loss of all master-eligible nodes in a cluster by creating a new +cluster and then using the `elasticsearch-node detach-cluster` command to move any +surviving nodes into this new cluster. Once the new cluster is fully formed, +use the <> to list, import or delete +any dangling indices. There is a risk of data loss when importing a dangling index because data nodes may not have the most recent copy of the index metadata and do not have any @@ -314,7 +320,9 @@ cluster`. <>. 6. Start each data node and verify that it has joined the new cluster. 7. Wait for all recoveries to have completed, and investigate the data in the -cluster to discover if any was lost during this process. +cluster to discover if any was lost during this process. Use the +<> to list, import or delete any +dangling indices. The message `Node was successfully detached from the cluster` does not mean that there has been no data loss, it just means that tool was able to complete diff --git a/docs/reference/commands/reconfigure-node.asciidoc b/docs/reference/commands/reconfigure-node.asciidoc index f06aee9f94db..838de8909bda 100644 --- a/docs/reference/commands/reconfigure-node.asciidoc +++ b/docs/reference/commands/reconfigure-node.asciidoc @@ -28,7 +28,7 @@ cluster where security features are already enabled and configured. Before starting your new node, run the <> tool with the `-s node` option to generate an enrollment token on any node in your -existing cluster. On your new node, run the the +existing cluster. On your new node, run the `elasticsearch-reconfigure-node` tool and pass the enrollment token as a parameter. @@ -54,6 +54,11 @@ nodes in an existing, secured cluster. `-v, --verbose`:: Shows verbose output. + +:tool-name: reconfigure-node +include::cli-jvm-options.asciidoc[] +:!tool-name: + [discrete] === Examples diff --git a/docs/reference/commands/shard-tool.asciidoc b/docs/reference/commands/shard-tool.asciidoc index 7dfdadbf4e2a..a2d9d557adf5 100644 --- a/docs/reference/commands/shard-tool.asciidoc +++ b/docs/reference/commands/shard-tool.asciidoc @@ -44,6 +44,10 @@ There are two ways to specify the path: * Use the `--dir` option to specify the full path to the corrupted index or translog files. +:tool-name: shard +include::cli-jvm-options.asciidoc[] +:!tool-name: + [discrete] ==== Removing corrupted data diff --git a/docs/reference/data-streams/downsampling-ilm.asciidoc b/docs/reference/data-streams/downsampling-ilm.asciidoc index 938d773bdec2..32499fcad0d6 100644 --- a/docs/reference/data-streams/downsampling-ilm.asciidoc +++ b/docs/reference/data-streams/downsampling-ilm.asciidoc @@ -4,8 +4,6 @@ Run downsampling with ILM ++++ -preview::[] - This is a simplified example that allows you to see quickly how <> works as part of an ILM policy to reduce the storage size of a sampled set of metrics. The example uses typical Kubernetes @@ -38,7 +36,7 @@ To enable downsampling, add a <> and set <> to the downsampling interval at which you want to aggregate the original time series data. -In this example, an ILM policy is configired for the `hot` phase. The downsample +In this example, an ILM policy is configured for the `hot` phase. The downsample takes place after the initial index rollover, which for demonstration purposes is set to run after five minutes. @@ -292,7 +290,8 @@ GET _data_stream If the ILM policy has not yet been applied, your results will be like the following. Note the original `index_name`: `.ds-datastream--000001`. -``` +[source,console-result] +---- { "data_streams": [ { @@ -329,7 +328,9 @@ following. Note the original `index_name`: `.ds-datastream--000001`. } ] } -``` +---- +// TEST[skip:todo] +// TEST[continued] Next, run a search query: @@ -341,7 +342,8 @@ GET datastream/_search The query returns your ten newly added documents. -``` +[source,console-result] +---- { "took": 17, "timed_out": false, @@ -357,7 +359,9 @@ The query returns your ten newly added documents. "relation": "eq" }, ... -``` +---- +// TEST[skip:todo] +// TEST[continued] By default, index lifecycle management checks every ten minutes for indices that meet policy criteria. Wait for about ten minutes (maybe brew up a quick coffee @@ -373,7 +377,8 @@ After the ILM policy has taken effect, the original `.ds-datastream-2022.08.26-000001` index is replaced with a new, downsampled index, in this case `downsample-6tkn-.ds-datastream-2022.08.26-000001`. -``` +[source,console-result] +---- { "data_streams": [ { @@ -392,7 +397,9 @@ index, in this case `downsample-6tkn-.ds-datastream-2022.08.26-000001`. } ], ... -``` +---- +// TEST[skip:todo] +// TEST[continued] Run a search query on the datastream. @@ -400,13 +407,14 @@ Run a search query on the datastream. ---- GET datastream/_search ---- -// TEST[skip: The @timestamp value won't match an accepted range in the TSDS] +// TEST[continued] The new downsampled index contains just one document that includes the `min`, `max`, `sum`, and `value_count` statistics based off of the original sampled metrics. -``` +[source,console-result] +---- { "took": 6, "timed_out": false, @@ -483,7 +491,9 @@ metrics. ] } } -``` +---- +// TEST[skip:todo] +// TEST[continued] Use the <> to get statistics for the data stream, including the storage size. @@ -492,9 +502,10 @@ the data stream, including the storage size. ---- GET /_data_stream/datastream/_stats?human=true ---- -// TEST[skip: The @timestamp value won't match an accepted range in the TSDS] +// TEST[continued] -``` +[source,console-result] +---- { "_shards": { "total": 4, @@ -515,7 +526,9 @@ GET /_data_stream/datastream/_stats?human=true } ] } -``` +---- +// TEST[skip:todo] +// TEST[continued] This example demonstrates how downsampling works as part of an ILM policy to reduce the storage size of metrics data as it becomes less current and less diff --git a/docs/reference/data-streams/downsampling-manual.asciidoc b/docs/reference/data-streams/downsampling-manual.asciidoc index 311a02eb4868..475c57895fcc 100644 --- a/docs/reference/data-streams/downsampling-manual.asciidoc +++ b/docs/reference/data-streams/downsampling-manual.asciidoc @@ -4,8 +4,6 @@ Run downsampling manually ++++ -preview::[] - This is a simplified example that allows you to see quickly how <> works to reduce the storage size of a time series index. The example uses typical Kubernetes cluster monitoring data. To test out @@ -200,7 +198,6 @@ PUT /sample-01 } ---- -// TEST [discrete] [[downsampling-manual-ingest-data]] @@ -209,14 +206,13 @@ PUT /sample-01 In a terminal window with {es} running, run the following curl command to load the documents from the downloaded sample data file: -//[source,console] -//---- -``` +[source,sh] +---- curl -s -H "Content-Type: application/json" \ -XPOST http:///sample-01/_bulk?pretty \ --data-binary @sample-k8s-metrics.json -``` -//---- +---- +// NOTCONSOLE Approximately 18,000 documents are added. Check the search results for the newly ingested data: @@ -227,11 +223,12 @@ GET /sample-01*/_search ---- // TEST[continued] -The query should return the first 10,000 hits. In each document you can see the -time series dimensions (`host`, `node`, `pod` and `container`) as well as the -various CPU and memory time series metrics. +The query has at least 10,000 hits and returns the first 10. In each document +you can see the time series dimensions (`host`, `node`, `pod` and `container`) +as well as the various CPU and memory time series metrics. -``` +[source,console-result] +---- "hits": { "total": { "value": 10000, @@ -294,7 +291,9 @@ various CPU and memory time series metrics. } } ... -``` +---- +// TEST[skip:todo] +// TEST[continued] Next, run a terms aggregation on the set of time series dimensions (`_tsid`) to create a date histogram on a fixed interval of one day. @@ -393,11 +392,12 @@ GET /sample-01*/_search ---- // TEST[continued] -In the query results, notice that the numer of hits has been reduced to only 288 +In the query results, notice that the number of hits has been reduced to only 288 documents. As well, for each time series metric statistical representations have been calculated: `min`, `max`, `sum`, and `value_count`. -``` +[source,console-result] +---- "hits": { "total": { "value": 288, @@ -455,7 +455,8 @@ been calculated: `min`, `max`, `sum`, and `value_count`. } }, ... -``` +---- +// TEST[skip:todo] This example demonstrates how downsampling can dramatically reduce the number of records stored for time series data, within whatever time boundaries you choose. diff --git a/docs/reference/data-streams/downsampling.asciidoc b/docs/reference/data-streams/downsampling.asciidoc index d70cbb1da6ba..99273e1a6854 100644 --- a/docs/reference/data-streams/downsampling.asciidoc +++ b/docs/reference/data-streams/downsampling.asciidoc @@ -1,8 +1,6 @@ [[downsampling]] === Downsampling a time series data stream -preview::[] - Downsampling provides a method to reduce the footprint of your <> by storing it at reduced granularity. @@ -72,18 +70,14 @@ To downsample a time series index, use the <> and set `fixed_interval` to the level of granularity that you'd like: -``` -POST //_downsample/ -{ - "fixed_interval": "1d" -} -``` +include::../indices/downsample-data-stream.asciidoc[tag=downsample-example] To downsample time series data as part of ILM, include a <> in your ILM policy and set `fixed_interval` to the level of granularity that you'd like: -``` +[source,console] +---- PUT _ilm/policy/my_policy { "policy": { @@ -98,7 +92,7 @@ PUT _ilm/policy/my_policy } } } -``` +---- [discrete] [[querying-downsampled-indices]] diff --git a/docs/reference/data-streams/set-up-tsds.asciidoc b/docs/reference/data-streams/set-up-tsds.asciidoc index bee0d40ece4a..3c15011871f8 100644 --- a/docs/reference/data-streams/set-up-tsds.asciidoc +++ b/docs/reference/data-streams/set-up-tsds.asciidoc @@ -4,8 +4,6 @@ Set up a TSDS ++++ -preview::[] - To set up a <>, follow these steps: . Check the <>. diff --git a/docs/reference/data-streams/tsds-index-settings.asciidoc b/docs/reference/data-streams/tsds-index-settings.asciidoc index dea828091c8f..8512a3841b37 100644 --- a/docs/reference/data-streams/tsds-index-settings.asciidoc +++ b/docs/reference/data-streams/tsds-index-settings.asciidoc @@ -1,8 +1,6 @@ [[tsds-index-settings]] === Time series index settings -preview::[] - Backing indices in a <> support the following index settings. diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc index a454e1652e39..d94754d1f0d8 100644 --- a/docs/reference/data-streams/tsds.asciidoc +++ b/docs/reference/data-streams/tsds.asciidoc @@ -1,12 +1,9 @@ [[tsds]] == Time series data stream (TSDS) -preview::[] - A time series data stream (TSDS) models timestamped metrics data as one or more time series. -// TODO: Replace XX% with actual percentage You can use a TSDS to store metrics data more efficiently. In our benchmarks, metrics data stored in a TSDS used 44% less disk space than a regular data stream. @@ -114,7 +111,8 @@ always a short encoded hash. To prevent the `_tsid` value from being overly large, {es} limits the number of dimensions for an index using the <> index setting. While you can increase this limit, the resulting document `_tsid` -value can't exceed 32KB. +value can't exceed 32KB. Additionally the field name of a dimension cannot be +longer than 512 bytes and the each dimension value can't exceed 1kb. **** [discrete] @@ -189,6 +187,9 @@ document's `_tsid` using a <> request. However, you can use the `_tsid` field in aggregations and retrieve the `_tsid` value in searches using the <>. +WARNING: The format of the `_tsid` field shouldn't be relied upon. It may change +from version to version. + [discrete] [[time-bound-indices]] ==== Time-bound indices diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 15814efde309..d90026073990 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -49,6 +49,9 @@ privilege. ** To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. +** To make the result of a bulk operation visible to search using the `refresh` +parameter, you must have the `maintenance` or `manage` index privilege. + * Automatic data stream creation requires a matching index template with data stream enabled. See <>. diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index fc8e67ea0465..d671cb9b5a63 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -195,8 +195,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + Defaults to `open`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from] - include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=lenient] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 4927c84fff85..5f8b1fcc7f0c 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -141,8 +141,6 @@ If the request contains `wait_for_completion=false`, {es} performs some preflight checks, launches the request, and returns a <> you can use to cancel or get the status of the task. {es} creates a record of this task as a document at `_tasks/`. -When you are done with a task, you should delete the task document so -{es} can reclaim the space. [[docs-reindex-from-multiple-sources]] ===== Reindex from multiple sources @@ -577,6 +575,9 @@ Valid values: `index`, `create`. Defaults to `index`. IMPORTANT: To reindex to a data stream destination, this argument must be `create`. +`pipeline`::: +(Optional, string) the name of the <> to use. + `script`:: `source`::: (Optional, string) The script to run to update the document source or metadata when reindexing. diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index cd27ec369d25..5ef9d288623c 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -99,8 +99,6 @@ If the request contains `wait_for_completion=false`, {es} performs some preflight checks, launches the request, and returns a <> you can use to cancel or get the status of the task. {es} creates a record of this task as a document at `.tasks/task/${taskId}`. -When you are done with a task, you should delete the task document so -{es} can reclaim the space. ===== Waiting for active shards @@ -189,8 +187,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + Defaults to `open`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=from] - include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=lenient] diff --git a/docs/reference/eql/eql.asciidoc b/docs/reference/eql/eql.asciidoc index 7b3234fe2464..f2191b8402c1 100644 --- a/docs/reference/eql/eql.asciidoc +++ b/docs/reference/eql/eql.asciidoc @@ -30,10 +30,16 @@ describe activity that goes beyond IOCs. [[eql-required-fields]] === Required fields -To run an EQL search, the searched data stream or index must contain a -_timestamp_ and _event category_ field. By default, EQL uses the `@timestamp` -and `event.category` fields from the {ecs-ref}[Elastic Common Schema -(ECS)]. To use a different timestamp or event category field, see +With the exception of sample queries, EQL searches require that the searched +data stream or index contains a _timestamp_ field. By default, EQL uses the +`@timestamp` field from the {ecs-ref}[Elastic Common Schema (ECS)]. + +EQL searches also require an _event category_ field, unless you use the +<> to search for documents +without an event category field. By default, EQL uses the ECS `event.category` +field. + +To use a different timestamp or event category field, see <>. TIP: While no schema is required to use EQL, we recommend using the @@ -314,6 +320,561 @@ GET /my-data-stream/_eql/search ---- // TEST[setup:sec_logs] +[discrete] +[[eql-search-sample]] +=== Sample chronologically unordered events + +Use EQL's <> to search for events that match one or +more join keys and a set of filters. Samples are similar to sequences, but do +not return events in chronological order. In fact, sample queries can run on +data without a timestamp. Sample queries can be useful to find correlations in +events that don't always occur in the same sequence, or that occur across long +time spans. + +.Click to show the sample data used in the examples below +[%collapsible] +==== +[source,console] +---- +PUT /my-index-000001 +{ + "mappings": { + "properties": { + "ip": { + "type":"ip" + }, + "version": { + "type": "version" + }, + "missing_keyword": { + "type": "keyword" + }, + "@timestamp": { + "type": "date" + }, + "type_test": { + "type": "keyword" + }, + "@timestamp_pretty": { + "type": "date", + "format": "dd-MM-yyyy" + }, + "event_type": { + "type": "keyword" + }, + "event": { + "properties": { + "category": { + "type": "alias", + "path": "event_type" + } + } + }, + "host": { + "type": "keyword" + }, + "os": { + "type": "keyword" + }, + "bool": { + "type": "boolean" + }, + "uptime" : { + "type" : "long" + }, + "port" : { + "type" : "long" + } + } + } +} + +PUT /my-index-000002 +{ + "mappings": { + "properties": { + "ip": { + "type":"ip" + }, + "@timestamp": { + "type": "date" + }, + "@timestamp_pretty": { + "type": "date", + "format": "yyyy-MM-dd" + }, + "type_test": { + "type": "keyword" + }, + "event_type": { + "type": "keyword" + }, + "event": { + "properties": { + "category": { + "type": "alias", + "path": "event_type" + } + } + }, + "host": { + "type": "keyword" + }, + "op_sys": { + "type": "keyword" + }, + "bool": { + "type": "boolean" + }, + "uptime" : { + "type" : "long" + }, + "port" : { + "type" : "long" + } + } + } +} + +PUT /my-index-000003 +{ + "mappings": { + "properties": { + "host_ip": { + "type":"ip" + }, + "@timestamp": { + "type": "date" + }, + "date": { + "type": "date" + }, + "event_type": { + "type": "keyword" + }, + "event": { + "properties": { + "category": { + "type": "alias", + "path": "event_type" + } + } + }, + "missing_keyword": { + "type": "keyword" + }, + "host": { + "type": "keyword" + }, + "os": { + "type": "keyword" + }, + "bool": { + "type": "boolean" + }, + "uptime" : { + "type" : "long" + }, + "port" : { + "type" : "long" + } + } + } +} + +POST /my-index-000001/_bulk?refresh +{"index":{"_id":1}} +{"@timestamp":"1234567891","@timestamp_pretty":"12-12-2022","missing_keyword":"test","type_test":"abc","ip":"10.0.0.1","event_type":"alert","host":"doom","uptime":0,"port":1234,"os":"win10","version":"1.0.0","id":11} +{"index":{"_id":2}} +{"@timestamp":"1234567892","@timestamp_pretty":"13-12-2022","event_type":"alert","type_test":"abc","host":"CS","uptime":5,"port":1,"os":"win10","version":"1.2.0","id":12} +{"index":{"_id":3}} +{"@timestamp":"1234567893","@timestamp_pretty":"12-12-2022","event_type":"alert","type_test":"abc","host":"farcry","uptime":1,"port":1234,"bool":false,"os":"win10","version":"2.0.0","id":13} +{"index":{"_id":4}} +{"@timestamp":"1234567894","@timestamp_pretty":"13-12-2022","event_type":"alert","type_test":"abc","host":"GTA","uptime":3,"port":12,"os":"slack","version":"10.0.0","id":14} +{"index":{"_id":5}} +{"@timestamp":"1234567895","@timestamp_pretty":"17-12-2022","event_type":"alert","host":"sniper 3d","uptime":6,"port":1234,"os":"fedora","version":"20.1.0","id":15} +{"index":{"_id":6}} +{"@timestamp":"1234568896","@timestamp_pretty":"17-12-2022","event_type":"alert","host":"doom","port":65123,"bool":true,"os":"redhat","version":"20.10.0","id":16} +{"index":{"_id":7}} +{"@timestamp":"1234567897","@timestamp_pretty":"17-12-2022","missing_keyword":"yyy","event_type":"failure","host":"doom","uptime":15,"port":1234,"bool":true,"os":"redhat","version":"20.2.0","id":17} +{"index":{"_id":8}} +{"@timestamp":"1234567898","@timestamp_pretty":"12-12-2022","missing_keyword":"test","event_type":"success","host":"doom","uptime":16,"port":512,"os":"win10","version":"1.2.3","id":18} +{"index":{"_id":9}} +{"@timestamp":"1234567899","@timestamp_pretty":"15-12-2022","missing_keyword":"test","event_type":"success","host":"GTA","port":12,"bool":true,"os":"win10","version":"1.2.3","id":19} +{"index":{"_id":10}} +{"@timestamp":"1234567893","missing_keyword":null,"ip":"10.0.0.5","event_type":"alert","host":"farcry","uptime":1,"port":1234,"bool":true,"os":"win10","version":"1.2.3","id":110} + +POST /my-index-000002/_bulk?refresh +{"index":{"_id":1}} +{"@timestamp":"1234567991","type_test":"abc","ip":"10.0.0.1","event_type":"alert","host":"doom","uptime":0,"port":1234,"op_sys":"win10","id":21} +{"index":{"_id":2}} +{"@timestamp":"1234567992","type_test":"abc","event_type":"alert","host":"CS","uptime":5,"port":1,"op_sys":"win10","id":22} +{"index":{"_id":3}} +{"@timestamp":"1234567993","type_test":"abc","@timestamp_pretty":"2022-12-17","event_type":"alert","host":"farcry","uptime":1,"port":1234,"bool":false,"op_sys":"win10","id":23} +{"index":{"_id":4}} +{"@timestamp":"1234567994","event_type":"alert","host":"GTA","uptime":3,"port":12,"op_sys":"slack","id":24} +{"index":{"_id":5}} +{"@timestamp":"1234567995","event_type":"alert","host":"sniper 3d","uptime":6,"port":1234,"op_sys":"fedora","id":25} +{"index":{"_id":6}} +{"@timestamp":"1234568996","@timestamp_pretty":"2022-12-17","ip":"10.0.0.5","event_type":"alert","host":"doom","port":65123,"bool":true,"op_sys":"redhat","id":26} +{"index":{"_id":7}} +{"@timestamp":"1234567997","@timestamp_pretty":"2022-12-17","event_type":"failure","host":"doom","uptime":15,"port":1234,"bool":true,"op_sys":"redhat","id":27} +{"index":{"_id":8}} +{"@timestamp":"1234567998","ip":"10.0.0.1","event_type":"success","host":"doom","uptime":16,"port":512,"op_sys":"win10","id":28} +{"index":{"_id":9}} +{"@timestamp":"1234567999","ip":"10.0.0.1","event_type":"success","host":"GTA","port":12,"bool":false,"op_sys":"win10","id":29} + +POST /my-index-000003/_bulk?refresh +{"index":{"_id":1}} +{"@timestamp":"1334567891","host_ip":"10.0.0.1","event_type":"alert","host":"doom","uptime":0,"port":12,"os":"win10","id":31} +{"index":{"_id":2}} +{"@timestamp":"1334567892","event_type":"alert","host":"CS","os":"win10","id":32} +{"index":{"_id":3}} +{"@timestamp":"1334567893","event_type":"alert","host":"farcry","bool":true,"os":"win10","id":33} +{"index":{"_id":4}} +{"@timestamp":"1334567894","event_type":"alert","host":"GTA","os":"slack","bool":true,"id":34} +{"index":{"_id":5}} +{"@timestamp":"1234567895","event_type":"alert","host":"sniper 3d","os":"fedora","id":35} +{"index":{"_id":6}} +{"@timestamp":"1234578896","host_ip":"10.0.0.1","event_type":"alert","host":"doom","bool":true,"os":"redhat","id":36} +{"index":{"_id":7}} +{"@timestamp":"1234567897","event_type":"failure","missing_keyword":"test","host":"doom","bool":true,"os":"redhat","id":37} +{"index":{"_id":8}} +{"@timestamp":"1234577898","event_type":"success","host":"doom","os":"win10","id":38,"date":"1671235200000"} +{"index":{"_id":9}} +{"@timestamp":"1234577899","host_ip":"10.0.0.5","event_type":"success","host":"GTA","bool":true,"os":"win10","id":39} +---- +==== + +A sample query specifies at least one join key, using the <>, and up to five filters: + +[source,console] +---- +GET /my-index*/_eql/search +{ + "query": """ + sample by host + [any where uptime > 0] + [any where port > 100] + [any where bool == true] + """ +} +---- +// TEST[continued] + +By default, the response’s `hits.sequences` property contains up to 10 samples. +Each sample has a set of `join_keys` and an array with one matching event for +each of the filters. Events are returned in the order of the filters they match: + +[source,console-result] +---- +{ + ... + "hits": { + "total": { + "value": 2, + "relation": "eq" + }, + "sequences": [ + { + "join_keys": [ + "doom" <1> + ], + "events": [ + { <2> + "_index": "my-index-000001", + "_id": "7", + "_source": { + "@timestamp": "1234567897", + "@timestamp_pretty": "17-12-2022", + "missing_keyword": "yyy", + "event_type": "failure", + "host": "doom", + "uptime": 15, + "port": 1234, + "bool": true, + "os": "redhat", + "version": "20.2.0", + "id": 17 + } + }, + { <3> + "_index": "my-index-000001", + "_id": "1", + "_source": { + "@timestamp": "1234567891", + "@timestamp_pretty": "12-12-2022", + "missing_keyword": "test", + "type_test": "abc", + "ip": "10.0.0.1", + "event_type": "alert", + "host": "doom", + "uptime": 0, + "port": 1234, + "os": "win10", + "version": "1.0.0", + "id": 11 + } + }, + { <4> + "_index": "my-index-000001", + "_id": "6", + "_source": { + "@timestamp": "1234568896", + "@timestamp_pretty": "17-12-2022", + "event_type": "alert", + "host": "doom", + "port": 65123, + "bool": true, + "os": "redhat", + "version": "20.10.0", + "id": 16 + } + } + ] + }, + { + "join_keys": [ + "farcry" <5> + ], + "events": [ + { + "_index": "my-index-000001", + "_id": "3", + "_source": { + "@timestamp": "1234567893", + "@timestamp_pretty": "12-12-2022", + "event_type": "alert", + "type_test": "abc", + "host": "farcry", + "uptime": 1, + "port": 1234, + "bool": false, + "os": "win10", + "version": "2.0.0", + "id": 13 + } + }, + { + "_index": "my-index-000001", + "_id": "10", + "_source": { + "@timestamp": "1234567893", + "missing_keyword": null, + "ip": "10.0.0.5", + "event_type": "alert", + "host": "farcry", + "uptime": 1, + "port": 1234, + "bool": true, + "os": "win10", + "version": "1.2.3", + "id": 110 + } + }, + { + "_index": "my-index-000003", + "_id": "3", + "_source": { + "@timestamp": "1334567893", + "event_type": "alert", + "host": "farcry", + "bool": true, + "os": "win10", + "id": 33 + } + } + ] + } + ] + } +} +---- +// TESTRESPONSE[s/ \.\.\.\n/"is_partial": false, "is_running": false, "took": $body.took, "timed_out": false,/] + +<1> The events in the first sample have a value of `doom` for `host`. +<2> This event matches the first filter. +<3> This event matches the second filter. +<4> This event matches the third filter. +<5> The events in the second sample have a value of `farcry` for `host`. + +You can specify multiple join keys: + +[source,console] +---- +GET /my-index*/_eql/search +{ + "query": """ + sample by host + [any where uptime > 0] by os + [any where port > 100] by op_sys + [any where bool == true] by os + """ +} +---- +// TEST[continued] + +This query will return samples where each of the events shares the same value +for `os` or `op_sys`, as well as for `host`. For example: + +[source,console-result] +---- +{ + ... + "hits": { + "total": { + "value": 2, + "relation": "eq" + }, + "sequences": [ + { + "join_keys": [ + "doom", <1> + "redhat" + ], + "events": [ + { + "_index": "my-index-000001", + "_id": "7", + "_source": { + "@timestamp": "1234567897", + "@timestamp_pretty": "17-12-2022", + "missing_keyword": "yyy", + "event_type": "failure", + "host": "doom", + "uptime": 15, + "port": 1234, + "bool": true, + "os": "redhat", + "version": "20.2.0", + "id": 17 + } + }, + { + "_index": "my-index-000002", + "_id": "6", + "_source": { + "@timestamp": "1234568996", + "@timestamp_pretty": "2022-12-17", + "ip": "10.0.0.5", + "event_type": "alert", + "host": "doom", + "port": 65123, + "bool": true, + "op_sys": "redhat", + "id": 26 + } + }, + { + "_index": "my-index-000001", + "_id": "6", + "_source": { + "@timestamp": "1234568896", + "@timestamp_pretty": "17-12-2022", + "event_type": "alert", + "host": "doom", + "port": 65123, + "bool": true, + "os": "redhat", + "version": "20.10.0", + "id": 16 + } + } + ] + }, + { + "join_keys": [ + "farcry", + "win10" + ], + "events": [ + { + "_index": "my-index-000001", + "_id": "3", + "_source": { + "@timestamp": "1234567893", + "@timestamp_pretty": "12-12-2022", + "event_type": "alert", + "type_test": "abc", + "host": "farcry", + "uptime": 1, + "port": 1234, + "bool": false, + "os": "win10", + "version": "2.0.0", + "id": 13 + } + }, + { + "_index": "my-index-000002", + "_id": "3", + "_source": { + "@timestamp": "1234567993", + "type_test": "abc", + "@timestamp_pretty": "2022-12-17", + "event_type": "alert", + "host": "farcry", + "uptime": 1, + "port": 1234, + "bool": false, + "op_sys": "win10", + "id": 23 + } + }, + { + "_index": "my-index-000001", + "_id": "10", + "_source": { + "@timestamp": "1234567893", + "missing_keyword": null, + "ip": "10.0.0.5", + "event_type": "alert", + "host": "farcry", + "uptime": 1, + "port": 1234, + "bool": true, + "os": "win10", + "version": "1.2.3", + "id": 110 + } + } + ] + } + ] + } +} +---- +// TESTRESPONSE[s/ \.\.\.\n/"is_partial": false, "is_running": false, "took": $body.took, "timed_out": false,/] + +<1> The events in this sample have a value of `doom` for `host` and a value of +`redhat` for `os` or `op_sys`. + +By default, the response of a sample query contains up to 10 samples, with one +sample per unique set of join keys. Use the `size` parameter to get a smaller or +larger set of samples. To retrieve more than one sample per set of join keys, +use the `max_samples_per_key` parameter. Pipes are not supported for sample +queries. + +[source,console] +---- +GET /my-index*/_eql/search +{ + "max_samples_per_key": 2, <1> + "size": 20, <2> + "query": """ + sample + [any where uptime > 0] by host,os + [any where port > 100] by host,op_sys + [any where bool == true] by host,os + """ +} +---- +// TEST[continued] + +<1> Retrieve up to 2 samples per set of join keys. +<2> Retrieve up to 20 samples in total. + [discrete] [[retrieve-selected-fields]] === Retrieve selected fields diff --git a/docs/reference/eql/syntax.asciidoc b/docs/reference/eql/syntax.asciidoc index 8fd06ad893c0..1893f6d82e6c 100644 --- a/docs/reference/eql/syntax.asciidoc +++ b/docs/reference/eql/syntax.asciidoc @@ -792,6 +792,47 @@ sequence [ library where process.name == "regsvr32.exe" ] by dll.path with runs=3 ---- +[discrete] +[[eql-samples]] +=== Samples + +You can use EQL samples to describe and match a chronologically unordered series +of events. All events in a sample share the same value for one or more fields +that are specified using the <> (join keys). Each +item in a sample is an event category and event condition, surrounded by square +brackets (`[ ]`). Events are listed in the order of the filters they match. + +[source,eql] +---- +sample by join_key + [ event_category_1 where condition_1 ] + [ event_category_2 where condition_2 ] + ... +---- + +*Example* + +The following EQL sample query returns up to 10 samples with unique values for +`host`. Each sample consists of two events: + +. Start with an event with: ++ +-- +* An event category of `file` +* A `file.extension` of `exe` +-- +. Followed by an event with an event category of `process` + +[source,eql] +---- +sample by host + [ file where file.extension == "exe" ] + [ process where true ] +---- + +Sample queries do not take into account the chronological ordering of events. +The `with maxspan` and `with runs` statements as well as the `until` keyword are +not supported. + [discrete] [[eql-functions]] === Functions diff --git a/docs/reference/features/apis/reset-features-api.asciidoc b/docs/reference/features/apis/reset-features-api.asciidoc index 300ce166eaa1..d8ba0832cc2a 100644 --- a/docs/reference/features/apis/reset-features-api.asciidoc +++ b/docs/reference/features/apis/reset-features-api.asciidoc @@ -6,7 +6,7 @@ experimental::[] -Clears all of the the state information stored in system indices by {es} features, including the security and machine learning indices. +Clears all of the state information stored in system indices by {es} features, including the security and machine learning indices. WARNING: Intended for development and testing use only. Do not reset features on a production cluster. diff --git a/docs/reference/health/health.asciidoc b/docs/reference/health/health.asciidoc index be310ca85ea0..bcbf06559b90 100644 --- a/docs/reference/health/health.asciidoc +++ b/docs/reference/health/health.asciidoc @@ -4,18 +4,14 @@ Health ++++ -An experimental API that returns the health status of an {es} cluster. - -This API is currently experimental for internal use by Elastic software only. - -NOTE: {cloud-only} +An API that reports the health status of an {es} cluster. [[health-api-request]] ==== {api-request-title} -`GET /_internal/_health` + +`GET /_health_report` + -`GET /_internal/_health/` + +`GET /_health_report/` + [[health-api-prereqs]] ==== {api-prereq-title} @@ -26,8 +22,8 @@ NOTE: {cloud-only} [[health-api-desc]] ==== {api-description-title} -The health API returns the health status of an Elasticsearch cluster. It -returns a list of indicators that compose Elasticsearch functionality. +The health API returns a report with the health status of an Elasticsearch cluster. The report +contains a list of indicators that compose Elasticsearch functionality. Each indicator has a health status of: `green`, `unknown`, `yellow` or `red`. The indicator will provide an explanation and metadata describing the reason for its current health status. @@ -44,7 +40,7 @@ steps that can be performed in order to improve the health of the system. The ro steps are encapsulated in a `diagnosis`. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed -step by step troubleshooting guide to fix the diagnosed problem. +step-by-step troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API @@ -92,6 +88,12 @@ for health status set `verbose` to `false` to disable the more expensive analysi These details include additional troubleshooting metrics and sometimes a root cause analysis of a health status. Defaults to `true`. +`size`:: + (Optional, integer) The maximum number of affected resources to return. + As a diagnosis can return multiple types of affected resources this parameter will limit the number of resources returned for each type to the configured value (e.g. a diagnosis could return + `1000` affected indices and `1000` affected nodes). + Defaults to `1000`. + [role="child_attributes"] [[health-api-response-body]] ==== {api-response-body-title} @@ -194,7 +196,7 @@ for health status set `verbose` to `false` to disable the more expensive analysi `action`:: (string) A brief description the steps that should be taken to remediate the problem. - A more detailed step by step guide to remediate the problem is provided by the + A more detailed step-by-step guide to remediate the problem is provided by the `help_url` field. `affected_resources`:: @@ -203,7 +205,7 @@ for health status set `verbose` to `false` to disable the more expensive analysi diagnosis is applicable for. `help_url`:: - (string) A link to the troubleshooting guide that'll fix the healh problem. + (string) A link to the troubleshooting guide that'll fix the health problem. ======== ======= ==== @@ -212,7 +214,7 @@ for health status set `verbose` to `false` to disable the more expensive analysi [[health-api-response-details]] ==== Indicator Details -Each health indicator in the health api returns a set of details that further explains the state of the system. The +Each health indicator in the health API returns a set of details that further explains the state of the system. The details have contents and a structure that is unique to each indicator. [[health-api-response-details-master-is-stable]] @@ -380,7 +382,7 @@ watermark threshold>>. [source,console] -------------------------------------------------- -GET _internal/_health +GET _health_report -------------------------------------------------- The API returns a response with all the indicators regardless @@ -388,14 +390,14 @@ of current status. [source,console] -------------------------------------------------- -GET _internal/_health/shards_availability +GET _health_report/shards_availability -------------------------------------------------- The API returns a response for just the shard availability indicator. [source,console] -------------------------------------------------- -GET _internal/_health?verbose=false +GET _health_report?verbose=false -------------------------------------------------- The API returns a response with all health indicators but will diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 04b670c1a724..0db3ca04e99a 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -10,7 +10,7 @@ goes to the filesystem cache so that Elasticsearch can keep hot regions of the index in physical memory. [discrete] -tag::readahead[] +// tag::readahead[] === Avoid page cache thrashing by using modest readahead values on Linux Search can cause a lot of randomized read I/O. When the underlying block @@ -35,7 +35,7 @@ as a transient setting). We recommend a value of `128KiB` for readahead. WARNING: `blockdev` expects values in 512 byte sectors whereas `lsblk` reports values in `KiB`. As an example, to temporarily set readahead to `128KiB` for `/dev/nvme0n1`, specify `blockdev --setra 256 /dev/nvme0n1`. -end::readahead[] +// end::readahead[] [discrete] === Use faster hardware @@ -358,7 +358,7 @@ PUT index } -------------------------------------------------- -tag::warm-fs-cache[] +// tag::warm-fs-cache[] [discrete] === Warm up the filesystem cache @@ -372,7 +372,7 @@ depending on the file extension using the WARNING: Loading data into the filesystem cache eagerly on too many indices or too many files will make search _slower_ if the filesystem cache is not large enough to hold all the data. Use with caution. -end::warm-fs-cache[] +// end::warm-fs-cache[] [discrete] === Use index sorting to speed up conjunctions @@ -424,6 +424,7 @@ be able to cope with `max_failures` node failures at once at most, then the right number of replicas for you is `max(max_failures, ceil(num_nodes / num_primaries) - 1)`. +[discrete] === Tune your queries with the Search Profiler The {ref}/search-profile.html[Profile API] provides detailed information about @@ -438,6 +439,7 @@ Because the Profile API itself adds significant overhead to the query, this information is best used to understand the relative cost of the various query components. It does not provide a reliable measure of actual processing time. +[discrete] [[faster-phrase-queries]] === Faster phrase queries with `index_phrases` @@ -446,6 +448,7 @@ indexes 2-shingles and is automatically leveraged by query parsers to run phrase queries that don't have a slop. If your use-case involves running lots of phrase queries, this can speed up queries significantly. +[discrete] [[faster-prefix-queries]] === Faster prefix queries with `index_prefixes` @@ -454,6 +457,7 @@ indexes prefixes of all terms and is automatically leveraged by query parsers to run prefix queries. If your use-case involves running lots of prefix queries, this can speed up queries significantly. +[discrete] [[faster-filtering-with-constant-keyword]] === Use `constant_keyword` to speed up filtering diff --git a/docs/reference/ilm/actions/ilm-downsample.asciidoc b/docs/reference/ilm/actions/ilm-downsample.asciidoc index 6daf008cb5b9..7365e3b179d0 100644 --- a/docs/reference/ilm/actions/ilm-downsample.asciidoc +++ b/docs/reference/ilm/actions/ilm-downsample.asciidoc @@ -2,8 +2,6 @@ [[ilm-downsample]] === Downsample -preview::[] - Phases allowed: hot, warm, cold. Aggregates a time series (TSDS) index and stores diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 9b7c448649f7..f901b2402440 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -96,11 +96,11 @@ The name of the phase that contains the action you want to perform or resume. `action`:: (Optional, string) -The name action you want to perform or resume. +The name action you want to perform or resume. Required if `name` used. `name`:: (Optional, string) -The name of the step to move to and execute. +The name of the step to move to and execute. Required if `action` used. ==== @@ -176,6 +176,26 @@ If the request succeeds, you receive the following result: "acknowledged": true } -------------------------------------------------- +// TEST[continued] The request will fail if the index is not in the `new` phase as specified by the `current_step`. + +The following example pushes `my-index-000001` from the end of hot phase into +the start of warm: + +[source,console] +-------------------------------------------------- +POST _ilm/move/my-index-000001 +{ + "current_step": { + "phase": "hot", + "action": "complete", + "name": "complete" + }, + "next_step": { + "phase": "warm" + } +} +-------------------------------------------------- +// TESTRESPONSE[skip: can't consistently get ILM in a completed hot phase during CI checks] \ No newline at end of file diff --git a/docs/reference/ilm/ilm-and-snapshots.asciidoc b/docs/reference/ilm/ilm-and-snapshots.asciidoc index 4d50aeb11e1c..40fd5062164a 100644 --- a/docs/reference/ilm/ilm-and-snapshots.asciidoc +++ b/docs/reference/ilm/ilm-and-snapshots.asciidoc @@ -2,6 +2,11 @@ [[index-lifecycle-and-snapshots]] == Restore a managed data stream or index +To <> managed indices, ensure that the {ilm-init} +policies referenced by the indices exist. If necessary, you can restore +{ilm-init} policies by setting +<> to `true`. + When you restore a managed index or a data stream with managed backing indices, {ilm-init} automatically resumes executing the restored indices' policies. A restored index's `min_age` is relative to when it was originally created or rolled over, diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index aa97993054b6..80b5c6550421 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -87,27 +87,27 @@ actions in the order listed. - <> - <> - <> + - <> - <> - <> - <> - - <> * Warm - <> - <> - <> + - <> - <> - <> - <> - <> - - <> * Cold - <> - <> - <> + - <> - <> - <> - <> - <> * Frozen - <> - <> diff --git a/docs/reference/images/spatial/geogrid_h3.png b/docs/reference/images/spatial/geogrid_h3.png new file mode 100644 index 000000000000..7552104044c2 Binary files /dev/null and b/docs/reference/images/spatial/geogrid_h3.png differ diff --git a/docs/reference/images/spatial/geogrid_h3_child.png b/docs/reference/images/spatial/geogrid_h3_child.png new file mode 100644 index 000000000000..02b6a8c7dcfe Binary files /dev/null and b/docs/reference/images/spatial/geogrid_h3_child.png differ diff --git a/docs/reference/images/spatial/geogrid_h3_children.png b/docs/reference/images/spatial/geogrid_h3_children.png new file mode 100644 index 000000000000..0382bc22da7a Binary files /dev/null and b/docs/reference/images/spatial/geogrid_h3_children.png differ diff --git a/docs/reference/images/spatial/geogrid_tile.png b/docs/reference/images/spatial/geogrid_tile.png new file mode 100644 index 000000000000..aa9d2a1ea08d Binary files /dev/null and b/docs/reference/images/spatial/geogrid_tile.png differ diff --git a/docs/reference/images/spatial/geogrid_tile_child.png b/docs/reference/images/spatial/geogrid_tile_child.png new file mode 100644 index 000000000000..35284407c410 Binary files /dev/null and b/docs/reference/images/spatial/geogrid_tile_child.png differ diff --git a/docs/reference/images/spatial/geoshape_hexgrid.png b/docs/reference/images/spatial/geoshape_hexgrid.png new file mode 100644 index 000000000000..b669314555b6 Binary files /dev/null and b/docs/reference/images/spatial/geoshape_hexgrid.png differ diff --git a/docs/reference/index-custom-title-page.html b/docs/reference/index-custom-title-page.html index 0b4e05bf2742..5c006367ebd1 100644 --- a/docs/reference/index-custom-title-page.html +++ b/docs/reference/index-custom-title-page.html @@ -41,6 +41,14 @@ -moz-columns: 2; } } + + #guide h3.gtk { + margin-top: 0; + } + + .mb-4, .my-4 { + margin-bottom: 0!important; + }

@@ -68,45 +76,7 @@

Search and analyze your data

-

Explore by use case

- - - -

Get to know Elasticsearch

+

Get to know Elasticsearch

@@ -229,5 +199,42 @@

+

Explore by use case

+ +

View all Elastic docs

diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index e258d8a74cc2..d5fc1fef7f33 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -320,7 +320,7 @@ Defaults to `*`, which matches all fields eligible for Default <> for the index. Index requests will fail if the default pipeline is set and the pipeline does not exist. The default may be overridden using the `pipeline` parameter. The special pipeline name `_none` indicates - no ingest pipeline should be run. + no default ingest pipeline will run. [[index-final-pipeline]] `index.final_pipeline`:: @@ -328,7 +328,7 @@ Final <> for the index. Indexing requests will fail if the final pipeline is set and the pipeline does not exist. The final pipeline always runs after the request pipeline (if specified) and the default pipeline (if it exists). The special pipeline name `_none` -indicates no ingest pipeline will run. +indicates no final ingest pipeline will run. + NOTE: You can't use a final pipeline to change the `_index` field. If the pipeline attempts to change the `_index` field, the indexing request will fail. diff --git a/docs/reference/indices/downsample-data-stream.asciidoc b/docs/reference/indices/downsample-data-stream.asciidoc index 425fd76472bf..8226c365dd50 100644 --- a/docs/reference/indices/downsample-data-stream.asciidoc +++ b/docs/reference/indices/downsample-data-stream.asciidoc @@ -5,8 +5,6 @@ Downsample ++++ -preview::[] - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. For example, @@ -14,6 +12,7 @@ a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index. +// tag::downsample-example[] //// [source,console] ---- @@ -74,6 +73,7 @@ DELETE _index_template/* ---- // TEST[continued] //// +// end::downsample-example[] [[downsample-api-request]] ==== {api-request-title} diff --git a/docs/reference/indices/ignore-missing-component-templates.asciidoc b/docs/reference/indices/ignore-missing-component-templates.asciidoc new file mode 100644 index 000000000000..8337be779c70 --- /dev/null +++ b/docs/reference/indices/ignore-missing-component-templates.asciidoc @@ -0,0 +1,95 @@ +[[ignore_missing_component_templates]] +== Config ignore_missing_component_templates + +The configuration option `ignore_missing_component_templates` can be used when an index template references a component template that might not exist. Every time a data stream is created based on the index template, the existence of the component template will be checked. If it exists, it will used to form the index's composite settings. If it does not exist, it is ignored. + +=== Usage example + +In the following, one component template and an index template are created. The index template references two component templates, but only the `@package` one exists. + + +Create the component template `logs-foo_component1`. This has to be created before the index template as it is not optional: + +[source,console] +---- +PUT _component_template/logs-foo_component1 +{ + "template": { + "mappings": { + "properties": { + "host.name": { + "type": "keyword" + } + } + } + } +} +---- + +Next, the index template will be created and it references two component templates: + +[source,JSON] +---- + "composed_of": ["logs-foo_component1", "logs-foo_component2"] +---- + +Before, only the `logs-foo_component1` compontent template was created, meaning the `logs-foo_component2` is missing. Because of this the following entry was added to the config: + +[source,JSON] +---- + "ignore_missing_component_templates": ["logs-foo_component2"], +---- + +During creation of the template, it will not validate that `logs-foo_component2` exists: + + +[source,console] +---- +PUT _index_template/logs-foo +{ + "index_patterns": ["logs-foo-*"], + "data_stream": { }, + "composed_of": ["logs-foo_component1", "logs-foo_component2"], + "ignore_missing_component_templates": ["logs-foo_component2"], + "priority": 500 +} +---- +// TEST[continued] + +The index template `logs-foo` was successfully created. A data stream can be created based on this template: + +[source,console] +---- +PUT _data_stream/logs-foo-bar +---- +// TEST[continued] + +Looking at the mappings of the data stream, it will contain the `host.name` field. + +At a later stage, the missing component template might be added: + +[source,console] +---- +PUT _component_template/logs-foo_component2 +{ + "template": { + "mappings": { + "properties": { + "host.ip": { + "type": "ip" + } + } + } + } +} +---- +// TEST[continued] + +This will not have an immediate effect on the data stream. The mapping `host.ip` will only show up in the data stream mappings when the data stream is rolled over automatically next time or a manual rollover is triggered: + +[source,console] +---- +POST logs-foo-bar/_rollover +---- +// TEST[continued] +// TEST[teardown:data_stream_cleanup] diff --git a/docs/reference/indices/index-templates.asciidoc b/docs/reference/indices/index-templates.asciidoc index 8a4c985970b2..6128ab48998f 100644 --- a/docs/reference/indices/index-templates.asciidoc +++ b/docs/reference/indices/index-templates.asciidoc @@ -161,3 +161,5 @@ DELETE _component_template/component_template1 //// include::simulate-multi-component-templates.asciidoc[] + +include::ignore-missing-component-templates.asciidoc[] diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 4a61155175d4..81b3aa13580c 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -4,10 +4,9 @@ Index recovery ++++ - -Returns information about ongoing and completed shard recoveries for one or more -indices. For data streams, the API returns information for the stream's backing -indices. +Returns information about ongoing and completed shard recoveries for one or +more indices. For data streams, the API returns information for the stream's +backing indices. [source,console] ---- @@ -33,14 +32,14 @@ index, or alias. [[index-recovery-api-desc]] ==== {api-description-title} -Use the index recovery API -to get information about ongoing and completed shard recoveries. +Use the index recovery API to get information about ongoing and completed shard +recoveries. // tag::shard-recovery-desc[] -Shard recovery is the process -of syncing a replica shard from a primary shard. -Upon completion, -the replica shard is available for search. +Shard recovery is the process of initializing a shard copy, such as restoring a +primary shard from a snapshot or syncing a replica shard from a primary shard. +When a shard recovery completes, the recovered shard is available for search +and indexing. Recovery automatically occurs during the following processes: @@ -52,6 +51,14 @@ Recovery automatically occurs during the following processes: <> operation. // end::shard-recovery-desc[] +The index recovery API reports information about completed recoveries only for +shard copies that currently exist in the cluster. It only reports the last +recovery for each shard copy and does not report historical information about +earlier recoveries, nor does it report information about the recoveries of +shard copies that no longer exist. This means that if a shard copy completes a +recovery and then {es} relocates it onto a different node then the information +about the original recovery will not be shown in the recovery API. + [[index-recovery-api-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index ad50a218fa03..99277a8bf652 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -160,7 +160,8 @@ The API returns the following response: "transport_address": "local[1]", "external_id": "node_t0", "attributes": {}, - "roles": [...] + "roles": [...], + "version": "8.7.0" }, "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <4> "allocation" : "primary|replica|unused" <5> @@ -178,6 +179,7 @@ The API returns the following response: // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] // TESTRESPONSE[s/"attributes": \{[^}]*\}/"attributes": $body.$_path/] // TESTRESPONSE[s/"roles": \[[^]]*\]/"roles": $body.$_path/] +// TESTRESPONSE[s/"8.7.0"/\$node_version/] diff --git a/docs/reference/indices/simulate-index.asciidoc b/docs/reference/indices/simulate-index.asciidoc index 54a9b36f83d1..e86b724332d7 100644 --- a/docs/reference/indices/simulate-index.asciidoc +++ b/docs/reference/indices/simulate-index.asciidoc @@ -4,9 +4,7 @@ Simulate index ++++ -experimental[] - -Returns the index configuration that would be applied to the specified index from an +Returns the index configuration that would be applied to the specified index from an existing <>. //// diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 958dd0ec9d77..89fddc0dd78d 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -48,6 +48,7 @@ include::processors/enrich.asciidoc[] include::processors/fail.asciidoc[] include::processors/fingerprint.asciidoc[] include::processors/foreach.asciidoc[] +include::processors/geo-grid.asciidoc[] include::processors/geoip.asciidoc[] include::processors/grok.asciidoc[] include::processors/gsub.asciidoc[] diff --git a/docs/reference/ingest/processors/circle.asciidoc b/docs/reference/ingest/processors/circle.asciidoc index f0ec60fcba90..2d46f797586b 100644 --- a/docs/reference/ingest/processors/circle.asciidoc +++ b/docs/reference/ingest/processors/circle.asciidoc @@ -12,11 +12,11 @@ Converts circle definitions of shapes to regular polygons which approximate them [options="header"] |====== | Name | Required | Default | Description -| `field` | yes | - | The string-valued field to trim whitespace from +| `field` | yes | - | The field to interpret as a circle. Either a string in WKT format or a map for GeoJSON. | `target_field` | no | `field` | The field to assign the polygon shape to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document | `error_distance` | yes | - | The difference between the resulting inscribed distance from center to side and the circle's radius (measured in meters for `geo_shape`, unit-less for `shape`) -| `shape_type` | yes | - | which field mapping type is to be used when processing the circle: `geo_shape` or `shape` +| `shape_type` | yes | - | Which field mapping type is to be used when processing the circle: `geo_shape` or `shape` include::common-options.asciidoc[] |====== diff --git a/docs/reference/ingest/processors/geo-grid.asciidoc b/docs/reference/ingest/processors/geo-grid.asciidoc new file mode 100644 index 000000000000..dc59ff09866a --- /dev/null +++ b/docs/reference/ingest/processors/geo-grid.asciidoc @@ -0,0 +1,250 @@ +[role="xpack"] +[[ingest-geo-grid-processor]] +=== Geo-grid processor +++++ +Geo-grid +++++ + +Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. +This is useful if there is a need to interact with the tile shapes as spatially indexable fields. +For example the `geotile` field value `"4/8/3"` could be indexed as a string field, but that would not enable +any spatial operations on it. +Instead, convert it to the value +`"POLYGON ((0.0 40.979898069620134, 22.5 40.979898069620134, 22.5 55.77657301866769, 0.0 55.77657301866769, 0.0 40.979898069620134))"`, +which can be indexed as a <> field. + +[[geo-grid-processor-options]] +.geo_grid processor options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to interpret as a geo-tile. The field format is determined by the `tile_type`. +| `tile_type` | yes | - | Three tile formats are understood: `geohash`, `geotile` and `geohex`. +| `target_field` | no | `field` | The field to assign the polygon shape to, by default `field` is updated in-place. +| `parent_field` | no | - | If specified and a parent tile exists, save that tile address to this field. +| `children_field` | no | - | If specified and children tiles exist, save those tile addresses to this field as an array of strings. +| `non_children_field` | no | - | If specified and intersecting non-child tiles exist, save their addresses to this field as an array of strings. +| `precision_field` | no | - | If specified, save the tile precision (zoom) as an integer to this field. +| `ignore_missing` | no | - | If `true` and `field` does not exist, the processor quietly exits without modifying the document. +| `target_format` | no | "GeoJSON" | Which format to save the generated polygon in. Either `WKT` or `GeoJSON`. +include::common-options.asciidoc[] +|====== + +To demonstrate the usage of this ingest processor, consider an index called `geocells` +with a mapping for a field `geocell` of type `geo_shape`. +In order to populate that index using `geotile` and `geohex` fields, define +two ingest processors: + +[source,console] +-------------------------------------------------- +PUT geocells +{ + "mappings": { + "properties": { + "geocell": { + "type": "geo_shape" + } + } + } +} + +PUT _ingest/pipeline/geotile2shape +{ + "description": "translate rectangular z/x/y geotile to bounding box", + "processors": [ + { + "geo_grid": { + "field": "geocell", + "tile_type": "geotile" + } + } + ] +} + +PUT _ingest/pipeline/geohex2shape +{ + "description": "translate H3 cell to polygon", + "processors": [ + { + "geo_grid": { + "field": "geocell", + "tile_type": "geohex", + "target_format": "wkt" + } + } + ] +} +-------------------------------------------------- + +These two pipelines can be used to index documents into the `geocells` index. +The `geocell` field will be the string version of either a rectangular tile with format `z/x/y` or an H3 cell address, +depending on which ingest processor we use when indexing the document. +The resulting geometry will be represented and indexed as a <> field in either +http://geojson.org[GeoJSON] or the https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] format. + +==== Example: Rectangular geotile with envelope in GeoJSON + +In this example a `geocell` field with a value defined in `z/x/y` format is indexed as a +http://geojson.org[GeoJSON Envelope] since the ingest-processor above was defined with default `target_format`. + +[source,console] +-------------------------------------------------- +PUT geocells/_doc/1?pipeline=geotile2shape +{ + "geocell": "4/8/5" +} + +GET geocells/_doc/1 +-------------------------------------------------- +// TEST[continued] + +The response shows how the ingest-processor has replaced the `geocell` field with an indexable `geo_shape`: + +[source,console-result] +-------------------------------------------------- +{ + "_index": "geocells", + "_id": "1", + "_version": 1, + "_seq_no": 0, + "_primary_term": 1, + "found": true, + "_source": { + "geocell": { + "type": "Envelope", + "coordinates": [ + [ 0.0, 55.77657301866769 ], + [ 22.5, 40.979898069620134 ] + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_version": \d+/"_version": $body._version/ s/"_seq_no": \d+/"_seq_no": $body._seq_no/ s/"_primary_term": 1/"_primary_term": $body._primary_term/] + +image:images/spatial/geogrid_tile.png[Kibana map with showing the geotile at 4/8/5 and its four child cells] + +==== Example: Hexagonal geohex with polygon in WKT format + +In this example a `geocell` field with an H3 string address is indexed as a +https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[WKT Polygon], since this ingest processor explicitly +defined the `target_format`. + +[source,console] +-------------------------------------------------- +PUT geocells/_doc/1?pipeline=geohex2shape +{ + "geocell": "811fbffffffffff" +} + +GET geocells/_doc/1 +-------------------------------------------------- +// TEST[continued] + +The response shows how the ingest-processor has replaced the `geocell` field with an indexable `geo_shape`: + +[source,console-result] +-------------------------------------------------- +{ + "_index": "geocells", + "_id": "1", + "_version": 1, + "_seq_no": 0, + "_primary_term": 1, + "found": true, + "_source": { + "geocell": "POLYGON ((1.1885095294564962 49.470279179513454, 2.0265689212828875 45.18424864858389, 7.509948452934623 43.786609335802495, 12.6773177459836 46.40695743262768, 12.345747342333198 50.55427505169064, 6.259687012061477 51.964770150370896, 3.6300085578113794 50.610463307239115, 1.1885095294564962 49.470279179513454))" + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_version": \d+/"_version": $body._version/ s/"_seq_no": \d+/"_seq_no": $body._seq_no/ s/"_primary_term": 1/"_primary_term": $body._primary_term/] + +image:images/spatial/geogrid_h3.png[Kibana map with showing an H3 cell, and its seven child cells] + +==== Example: Enriched tile details + +As described in <>, +there are many other fields that can be set, which will enrich the information available. +For example, with H3 tiles there are 7 child tiles, but only the first is fully contained by the parent. +The remaining six are only partially overlapping the parent, and there exist a further six non-child tiles +that overlap the parent. +This can be investigated by adding parent and child additional fields to the ingest-processor: + +[source,console] +-------------------------------------------------- +PUT _ingest/pipeline/geohex2shape +{ + "description": "translate H3 cell to polygon with enriched fields", + "processors": [ + { + "geo_grid": { + "description": "Ingest H3 cells like '811fbffffffffff' and create polygons", + "field": "geocell", + "tile_type": "geohex", + "target_format": "wkt", + "target_field": "shape", + "parent_field": "parent", + "children_field": "children", + "non_children_field": "nonChildren", + "precision_field": "precision" + } + } + ] +} +-------------------------------------------------- + +Index the document to see a different result: + +[source,console] +-------------------------------------------------- +PUT geocells/_doc/1?pipeline=geohex2shape +{ + "geocell": "811fbffffffffff" +} + +GET geocells/_doc/1 +-------------------------------------------------- +// TEST[continued] + +The response from this index request: + +[source,console-result] +-------------------------------------------------- +{ + "_index": "geocells", + "_id": "1", + "_version": 1, + "_seq_no": 0, + "_primary_term": 1, + "found": true, + "_source": { + "parent": "801ffffffffffff", + "geocell": "811fbffffffffff", + "precision": 1, + "shape": "POLYGON ((1.1885095294564962 49.470279179513454, 2.0265689212828875 45.18424864858389, 7.509948452934623 43.786609335802495, 12.6773177459836 46.40695743262768, 12.345747342333198 50.55427505169064, 6.259687012061477 51.964770150370896, 3.6300085578113794 50.610463307239115, 1.1885095294564962 49.470279179513454))", + "children": [ + "821f87fffffffff", + "821f8ffffffffff", + "821f97fffffffff", + "821f9ffffffffff", + "821fa7fffffffff", + "821faffffffffff", + "821fb7fffffffff" + ], + "nonChildren": [ + "821ea7fffffffff", + "82186ffffffffff", + "82396ffffffffff", + "821f17fffffffff", + "821e37fffffffff", + "82194ffffffffff" + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_version": \d+/"_version": $body._version/ s/"_seq_no": \d+/"_seq_no": $body._seq_no/ s/"_primary_term": 1/"_primary_term": $body._primary_term/] + +This additional information will then enable, for example, creating a visualization of the H3 cell, +its children and its intersecting non-children cells. + +image:images/spatial/geogrid_h3_children.png[Kibana map with three H3 layers: cell, children and intersecting non-children] diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index fa31a3bbe254..d39f3be82d2b 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -11,7 +11,10 @@ IPv4 or IPv6 address. By default, the processor uses the GeoLite2 City, GeoLite2 Country, and GeoLite2 ASN GeoIP2 databases from http://dev.maxmind.com/geoip/geoip2/geolite2/[MaxMind], shared under the -CC BY-SA 4.0 license. {es} automatically downloads updates for +CC BY-SA 4.0 license. It automatically downloads these databases if either +`ingest.geoip.downloader.eager.download` is set to true, or your cluster +has at least one pipeline with a `geoip` processor. {es} +automatically downloads updates for these databases from the Elastic GeoIP endpoint: https://geoip.elastic.co/v1/database. To get download statistics for these updates, use the <>. @@ -412,6 +415,13 @@ If `true`, {es} automatically downloads and manages updates for GeoIP2 databases from the `ingest.geoip.downloader.endpoint`. If `false`, {es} does not download updates and deletes all downloaded databases. Defaults to `true`. +[[ingest-geoip-downloader-eager-download]] +(<>, Boolean) +If `true`, {es} downloads GeoIP2 databases immediately, regardless of whether a +pipeline exists with a geoip processor. If `false`, {es} only begins downloading +the databases if a pipeline with a geoip processor exists or is added. Defaults +to `false`. + [[ingest-geoip-downloader-endpoint]] `ingest.geoip.downloader.endpoint`:: (<>, string) diff --git a/docs/reference/ingest/processors/json.asciidoc b/docs/reference/ingest/processors/json.asciidoc index faf93f67b854..84df028ef074 100644 --- a/docs/reference/ingest/processors/json.asciidoc +++ b/docs/reference/ingest/processors/json.asciidoc @@ -16,6 +16,7 @@ Converts a JSON string into a structured JSON object. | `add_to_root` | no | false | Flag that forces the parsed JSON to be added at the top level of the document. `target_field` must not be set when this option is chosen. | `add_to_root_conflict_strategy` | no | `replace` | When set to `replace`, root fields that conflict with fields from the parsed JSON will be overridden. When set to `merge`, conflicting fields will be merged. Only applicable if `add_to_root` is set to `true`. | `allow_duplicate_keys` | no | false | When set to `true`, the JSON parser will not fail if the JSON contains duplicate keys. Instead, the last encountered value for any duplicate key wins. +| `strict_json_parsing` | no | true | When set to `true`, the JSON parser will strictly parse the field value. When set to `false`, the JSON parser will be more lenient but also more likely to drop parts of the field value. For example if `strict_json_parsing` is set to `true` and the field value is `123 "foo"` then the processor will throw an IllegalArgumentException. But if `strict_json_parsing` is set to `false` then the field value will be parsed as `123`. include::common-options.asciidoc[] |====== diff --git a/docs/reference/ingest/processors/remove.asciidoc b/docs/reference/ingest/processors/remove.asciidoc index 6e9b4f24ff51..c3aa42a8f180 100644 --- a/docs/reference/ingest/processors/remove.asciidoc +++ b/docs/reference/ingest/processors/remove.asciidoc @@ -13,6 +13,7 @@ Removes existing fields. If one field doesn't exist, an exception will be thrown | Name | Required | Default | Description | `field` | yes | - | Fields to be removed. Supports <>. | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +| `keep` | no | - | Fields to be kept. When set, all fields other than those specified are removed. include::common-options.asciidoc[] |====== @@ -39,3 +40,15 @@ To remove multiple fields, you can use the following query: } -------------------------------------------------- // NOTCONSOLE + +You can also choose to remove all fields other than a specified list: + +[source,js] +-------------------------------------------------- +{ + "remove": { + "keep": ["url"] + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index 50a9ad315129..60b81c5f813a 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -1,5 +1,12 @@ [[synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. Though very handy to have around, the source field takes up a significant amount of space on disk. Instead of storing source documents on disk exactly as you @@ -25,6 +32,8 @@ space. There are a couple of restrictions to be aware of: * When you retrieve synthetic `_source` content it undergoes minor <> compared to the original JSON. +* The `params._source` is unavailable in scripts. Instead use the +{painless}/painless-field-context.html[`doc`] API or the <>. * Synthetic `_source` can be used with indices that contain only these field types: diff --git a/docs/reference/mapping/params/index.asciidoc b/docs/reference/mapping/params/index.asciidoc index fbfd6f11d839..08cc4ece23bb 100644 --- a/docs/reference/mapping/params/index.asciidoc +++ b/docs/reference/mapping/params/index.asciidoc @@ -2,6 +2,12 @@ === `index` The `index` option controls whether field values are indexed. It accepts `true` -or `false` and defaults to `true`. Fields that are not indexed are typically -not queryable. +or `false` and defaults to `true`. + +Indexing a field creates data structures that enable the field to be queried +efficiently. <>, <>, +the <>, <>, <> and the +<> can also be queried when they are not <> + but only have doc values enabled. Queries on these fields are slow as a full scan + of the index has to be made. All other fields are not queryable. diff --git a/docs/reference/mapping/runtime.asciidoc b/docs/reference/mapping/runtime.asciidoc index fd5f1a01cb71..5e3a61277109 100644 --- a/docs/reference/mapping/runtime.asciidoc +++ b/docs/reference/mapping/runtime.asciidoc @@ -34,8 +34,8 @@ data into the Elastic Stack and access it right away. When you define a runtime field, you can immediately use it in search requests, aggregations, filtering, and sorting. -If you make a runtime field an indexed field, you don't need to modify any -queries that refer to the runtime field. Better yet, you can refer to some +If you change a runtime field into an indexed field, you don't need to modify +any queries that refer to the runtime field. Better yet, you can refer to some indices where the field is a runtime field, and other indices where the field is an indexed field. You have the flexibility to choose which fields to index and which ones to keep as runtime fields. @@ -53,7 +53,7 @@ combined use less resources and reduce your operating costs. Runtime fields can replace many of the ways you can use scripting with the `_search` API. How you use a runtime field is impacted by the number of documents that the included script runs against. For example, if you're using -the `fields` parameter on the `_search` API to +the `fields` parameter on the `_search` API to <>, the script runs only against the top hits just like script fields do. @@ -77,7 +77,7 @@ If you move a script from any of these sections in a search request to a runtime field that is computing values from the same number of documents, the performance should be about the same. The performance for these features is largely dependent upon the calculations that the included script is running and -how many documents the script runs against. +how many documents the script runs against. [discrete] [[runtime-compromises]] @@ -88,9 +88,9 @@ the runtime script. To balance search performance and flexibility, index fields that you'll frequently search for and filter on, such as a timestamp. {es} automatically -uses these indexed fields first when running a query, resulting in a fast -response time. You can then use runtime fields to limit the number of fields -that {es} needs to calculate values for. Using indexed fields in tandem with +uses these indexed fields first when running a query, resulting in a fast +response time. You can then use runtime fields to limit the number of fields +that {es} needs to calculate values for. Using indexed fields in tandem with runtime fields provides flexibility in the data that you index and how you define queries for other fields. @@ -111,7 +111,7 @@ You map runtime fields by adding a `runtime` section under the mapping definition and defining <>. This script has access to the entire context of a document, including the original `_source` via `params._source` -and any mapped fields plus their values. At query time, the script runs and +and any mapped fields plus their values. At query time, the script runs and generates values for each scripted field that is required for the query. .Emitting runtime field values @@ -227,6 +227,16 @@ with `params._source` (such as `params._source.day_of_week`). For simplicity, defining a runtime field in the mapping definition without a script is the recommended option, whenever possible. +[[runtime-errorhandling]] +==== Ignoring script errors on runtime fields + +Scripts can throw errors at runtime, e.g. on accessing missing or invalid values +in documents or because of performing invalid operations. The `on_script_error` +parameter can be used to control error behaviour when this happens. Setting this +parameter to `continue` will have the effect of silently ignoring all errors on +this runtime field. The default `fail` value will cause a shard failure which +gets reported in the search response. + [[runtime-updating-scripts]] ==== Updating and removing runtime fields @@ -932,14 +942,14 @@ can define runtime fields in the decide to index a runtime field for greater performance, just move the full runtime field definition (including the script) to the context of an index mapping. {es} automatically uses these indexed fields to drive queries, -resulting in a fast response time. This capability means you can write a +resulting in a fast response time. This capability means you can write a script only once, and apply it to any context that supports runtime fields. NOTE: Indexing a `composite` runtime field is currently not supported. -You can then use runtime fields to limit the number of fields that {es} needs -to calculate values for. Using indexed fields in tandem with runtime fields -provides flexibility in the data that you index and how you define queries for +You can then use runtime fields to limit the number of fields that {es} needs +to calculate values for. Using indexed fields in tandem with runtime fields +provides flexibility in the data that you index and how you define queries for other fields. IMPORTANT: After indexing a runtime field, you cannot update the included @@ -1417,9 +1427,9 @@ GET my-index-000001/_search [[runtime-examples-grok-composite]] ==== Define a composite runtime field -You can also define a _composite_ runtime field to emit multiple fields from a -single script. You can define a set of typed subfields and emit a map of -values. At search time, each subfield retrieves the value associated with +You can also define a _composite_ runtime field to emit multiple fields from a +single script. You can define a set of typed subfields and emit a map of +values. At search time, each subfield retrieves the value associated with their name in the map. This means that you only need to specify your grok pattern one time and can return multiple values: @@ -1467,11 +1477,11 @@ GET my-index-000001/_search ---- // TEST[continued] -The API returns the following result. Because `http` is a `composite` runtime +The API returns the following result. Because `http` is a `composite` runtime field, the response includes each of the sub-fields under `fields`, including -any associated values that match the query. Without building your data structure +any associated values that match the query. Without building your data structure in advance, you can search and explore your data in meaningful ways to -experiment and determine which fields to index. +experiment and determine which fields to index. [source,console-result] ---- diff --git a/docs/reference/mapping/types/aggregate-metric-double.asciidoc b/docs/reference/mapping/types/aggregate-metric-double.asciidoc index 9df58119e8a3..7cbfaff5bc5f 100644 --- a/docs/reference/mapping/types/aggregate-metric-double.asciidoc +++ b/docs/reference/mapping/types/aggregate-metric-double.asciidoc @@ -92,9 +92,11 @@ aggregation returns the sum of the values of all `value_count` sub-fields. and `value_count` metrics. To run an `avg` aggregation, the field must contain both `sum` and `value_count` metric sub-field. -If you use an `aggregate_metric_double` field with other aggregations, the field -uses the `default_metric` value, which behaves as a `double` field. The -`default_metric` is also used in scripts and the following queries: +Running any other aggregation on an `aggregate_metric_double` field will fail with +an "unsupported aggregation" error. + +Finally, an `aggregate_metric_double` field supports the following queries for which +it behaves as a `double` by delegating its behavior to its `default_metric` sub-field: * <> * <> @@ -248,7 +250,15 @@ The search returns the following hit. The value of the `default_metric` field, // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,/] [[aggregate-metric-double-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `aggregate_metric-double` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>. diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index c1347866e406..8b62cecd9d52 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -216,7 +216,15 @@ The following parameters are accepted by `boolean` fields: Metadata about the field. [[boolean-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `boolean` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <> or with <> disabled. diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index eb5aeee9e8a1..4d69ddfd517c 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -10,9 +10,6 @@ JSON doesn't have a date data type, so dates in Elasticsearch can either be: * a number representing _milliseconds-since-the-epoch_. * a number representing _seconds-since-the-epoch_ (<>). -NOTE: Values for _milliseconds-since-the-epoch_ must be non-negative. Use a -formatted date to represent dates before 1970. - Internally, dates are converted to UTC (if the time-zone is specified) and stored as a long number representing milliseconds-since-the-epoch. @@ -232,7 +229,15 @@ Which will reply with a date like: ---- [[date-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `date` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <> set to true diff --git a/docs/reference/mapping/types/date_nanos.asciidoc b/docs/reference/mapping/types/date_nanos.asciidoc index 493a08dda74f..8f3af831e2b7 100644 --- a/docs/reference/mapping/types/date_nanos.asciidoc +++ b/docs/reference/mapping/types/date_nanos.asciidoc @@ -141,7 +141,15 @@ field. This limitation also affects <>. --- [[date-nanos-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `date_nanos` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <> set to true diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 198fbf1f928e..1f9a872ca0bc 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -51,8 +51,6 @@ It is not possible to store multiple values in one `dense_vector` field. [[index-vectors-knn-search]] ==== Index vectors for kNN search -experimental::[] - include::{es-repo-dir}/search/search-your-data/knn-search.asciidoc[tag=knn-def] Dense vector fields can be used to rank documents in @@ -195,5 +193,13 @@ neighbors for each new node. Defaults to `100`. ==== [[dense-vector-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `dense_vector` fields support <> . diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 3f08234a98b0..47996b8e4822 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -208,7 +208,15 @@ def lon = doc['location'].lon; -------------------------------------------------- [[geo-point-synthetic-source]] -==== Synthetic source preview:[] +==== Synthetic source + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `geo_point` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or with diff --git a/docs/reference/mapping/types/histogram.asciidoc b/docs/reference/mapping/types/histogram.asciidoc index 703cce6c83d0..fa95c4e6328d 100644 --- a/docs/reference/mapping/types/histogram.asciidoc +++ b/docs/reference/mapping/types/histogram.asciidoc @@ -69,7 +69,15 @@ means the field can technically be aggregated with either algorithm, in practice index data in that manner (e.g. centroids for T-Digest or intervals for HDRHistogram) to ensure best accuracy. [[histogram-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `histogram` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <> or <>. diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 1a7f90954a96..25e18a0c60a6 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -152,7 +152,15 @@ GET my-index-000001/_search -------------------------------------------------- [[ip-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `ip` fields support <> in their default configuration. Synthetic `_source` cannot be used together with <> or with <> disabled. diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index c1bd5b49dfc9..a47e1d4549eb 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -170,7 +170,15 @@ Dimension fields have the following constraints: -- [[keyword-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `keyword` fields support <> in their default configuration. Synthetic `_source` cannot be used together with a <> or <>. diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index 2e9805d2f178..bee79746806b 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -228,7 +228,15 @@ numeric field can't be both a time series dimension and a time series metric. This parameter is required. [[numeric-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + All numeric fields except `unsigned_long` support <> in their default configuration. Synthetic `_source` cannot be used together with <>, <>, or diff --git a/docs/reference/mapping/types/rank-features.asciidoc b/docs/reference/mapping/types/rank-features.asciidoc index 8be5e0018578..b54e99ede3fa 100644 --- a/docs/reference/mapping/types/rank-features.asciidoc +++ b/docs/reference/mapping/types/rank-features.asciidoc @@ -83,7 +83,10 @@ NOTE: `rank_features` fields only support single-valued features and strictly positive values. Multi-valued fields and zero or negative values will be rejected. NOTE: `rank_features` fields do not support sorting or aggregating and may -only be queried using <> queries. +only be queried using <> or <> queries. + +NOTE: <> queries on `rank_features` fields are scored by multiplying the matched +stored feature value by the provided `boost`. NOTE: `rank_features` fields only preserve 9 significant bits for the precision, which translates to a relative error of about 0.4%. diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 730ca12c1a3e..acc9c962add9 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -160,7 +160,15 @@ The following parameters are accepted by `text` fields: Metadata about the field. [[text-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `text` fields support <> if they have a <> sub-field that supports synthetic `_source` or if the `text` field sets `store` to `true`. Either way, it may @@ -258,11 +266,11 @@ Will become: `text` fields are searchable by default, but by default are not available for aggregations, sorting, or scripting. If you try to sort, aggregate, or access -values from a script on a `text` field, you will see this exception: +values from a `text` field using a script, you'll see an exception indicating +that field data is disabled by default on text fields. To load field data in +memory, set `fielddata=true` on your field. -Fielddata is disabled on text fields by default. Set `fielddata=true` on -`your_field_name` in order to load fielddata in memory by uninverting the -inverted index. Note that this can however use significant memory. +NOTE: Loading field data in memory can consume significant memory. Field data is the only way to access the analyzed tokens from a full text field in aggregations, sorting, or scripting. For example, a full text field like `New York` diff --git a/docs/reference/mapping/types/version.asciidoc b/docs/reference/mapping/types/version.asciidoc index b042ea02da7c..b208127ae270 100644 --- a/docs/reference/mapping/types/version.asciidoc +++ b/docs/reference/mapping/types/version.asciidoc @@ -69,7 +69,15 @@ you strongly rely on these kind of queries. [[version-synthetic-source]] -==== Synthetic `_source` preview:[] +==== Synthetic `_source` + +IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices +(indices that have `index.mode` set to `time_series`). For other indices +synthetic `_source` is in technical preview. Features in technical preview may +be changed or removed in a future release. Elastic will apply best effort to fix +any issues, but features in technical preview are not subject to the support SLA +of official GA features. + `version` fields support <> so long as they don't declare <>. diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 23f031426ab1..a523ea2d47f7 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -1,6 +1,7 @@ include::migration_intro.asciidoc[] * <> +* <> * <> * <> * <> @@ -9,6 +10,7 @@ include::migration_intro.asciidoc[] * <> include::migrate_8_7.asciidoc[] +include::migrate_8_6.asciidoc[] include::migrate_8_5.asciidoc[] include::migrate_8_4.asciidoc[] include::migrate_8_3.asciidoc[] diff --git a/docs/reference/migration/migrate_8_5.asciidoc b/docs/reference/migration/migrate_8_5.asciidoc index 00b3fbd091b9..4a91c68afb1d 100644 --- a/docs/reference/migration/migrate_8_5.asciidoc +++ b/docs/reference/migration/migrate_8_5.asciidoc @@ -9,9 +9,6 @@ your application to {es} 8.5. See also <> and <>. -coming::[8.5.0] - - [discrete] [[breaking-changes-8.5]] === Breaking changes diff --git a/docs/reference/migration/migrate_8_6.asciidoc b/docs/reference/migration/migrate_8_6.asciidoc index c266e1784c09..60d2af03cbf6 100644 --- a/docs/reference/migration/migrate_8_6.asciidoc +++ b/docs/reference/migration/migrate_8_6.asciidoc @@ -9,9 +9,6 @@ your application to {es} 8.6. See also <> and <>. -coming::[8.6.0] - - [discrete] [[breaking-changes-8.6]] === Breaking changes @@ -20,3 +17,79 @@ coming::[8.6.0] There are no breaking changes in {es} 8.6. // end::notable-breaking-changes[] + +[discrete] +[[deprecated-8.6]] +=== Deprecations + +The following functionality has been deprecated in {es} 8.6 +and will be removed in a future version. +While this won't have an immediate impact on your applications, +we strongly encourage you to take the described steps to update your code +after upgrading to 8.6. + +To find out if you are using any deprecated functionality, +enable <>. + + +[discrete] +[[deprecations_86_crud]] +==== CRUD deprecations + +[[deprecate_remove_binary_default_of_false_for_ingest_attachment_processor]] +.Deprecate 'remove_binary' default of false for ingest attachment processor +[%collapsible] +==== +*Details* + +The default "remove_binary" option for the attachment processor will be changed from false to true in a later Elasticsearch release. This means that the binary file sent to Elasticsearch will not be retained. + +*Impact* + +Users should update the "remove_binary" option to be explicitly true or false, instead of relying on the default value, so that no default value changes will affect Elasticsearch. +==== + +[discrete] +[[deprecations_86_cluster_and_node_setting]] +==== Cluster and node setting deprecations + +[[ensure_balance_threshold_at_least_1]] +.Ensure balance threshold is at least 1 +[%collapsible] +==== +*Details* + +Values for `cluster.routing.allocation.balance.threshold` smaller than `1` are now ignored. Support for values less than `1` for this setting is deprecated and will be forbidden in a future version. + +*Impact* + +Set `cluster.routing.allocation.balance.threshold` to be at least `1`. +==== + +[discrete] +[[deprecations_86_mapping]] +==== Mapping deprecations + +[[deprecate_silently_ignoring_type_fields_copy_to_boost_in_metadata_field_definition]] +.Deprecate silently ignoring type, fields, copy_to and boost in metadata field definition +[%collapsible] +==== +*Details* + +Unsupported parameters like type, fields, copy_to and boost are silently ignored when provided as part of the configuration of a metadata field in the index mappings. They will cause a deprecation warning when used in the mappings for indices that are created from 8.6 onwards. + +*Impact* + +To resolve the deprecation warning, remove the mention of type, fields, copy_to or boost from any metadata field definition as part of index mappings. They take no effect so removing them won't have any impact besides resolving the deprecation warning. +==== + +[discrete] +[[deprecations_86_rest_api]] +==== REST API deprecations + +[[state_field_deprecated_in_cluster_reroute_response]] +.state field is deprecated in /_cluster/reroute response +[%collapsible] +==== +*Details* + +`state` field is deprecated in `/_cluster/reroute` response. Cluster state does not provide meaningful information +about the result of reroute/commands execution. There are no guarantees that this exact state would be applied. + +*Impact* + +Reroute API users should not rely on `state` field and instead use `explain` to request result of commands execution. +==== + diff --git a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index a3a72bc39ab7..686632366315 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -135,6 +135,11 @@ reduced. If the bucket contains fewer samples than expected, the score is reduce `lower_confidence_bound`:::: (Optional, double) Lower bound of the 95% confidence interval. +`multimodal_distribution`:::: +(Optional, boolean) Indicates whether the bucket values' probability distribution has +several modes. When there are multiple modes, the typical value may not be the most +likely. + `multi_bucket_impact`:::: (Optional, integer) Impact of the deviation between actual and typical values in the past 12 buckets. diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index dd9b2f16cb57..48893f1aadb8 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -25,8 +25,9 @@ Requires the `manage_ml` cluster privilege. This privilege is included in the [[ml-update-datafeed-desc]] == {api-description-title} -If you update a {dfeed} property, you must stop and start the {dfeed} for the -change to be applied. +You can only update a {dfeed} property while the {dfeed} is stopped. +However, it is possible to stop a {dfeed}, update one of its properties, +and restart it without closing the associated job. IMPORTANT: When {es} {security-features} are enabled, your {dfeed} remembers which roles the user who updated it had at the time of update and runs the query diff --git a/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc index 2ef14025986f..aef8e13bd429 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-count-functions.asciidoc @@ -26,11 +26,11 @@ The {ml-features} include the following count functions: The `count` function detects anomalies when the number of events in a bucket is anomalous. -The `high_count` function detects anomalies when the count of events in a -bucket are unusually high. +The `high_count` function detects anomalies when the count of events in a bucket +are unusually high. -The `low_count` function detects anomalies when the count of events in a -bucket are unusually low. +The `low_count` function detects anomalies when the count of events in a bucket +are unusually low. These functions support the following properties: @@ -111,8 +111,8 @@ PUT _ml/anomaly_detectors/example3 -------------------------------------------------- // TEST[skip:needs-licence] -In this example, the function detects when the count of events for a -status code is lower than usual. +In this example, the function detects when the count of events for a status code +is lower than usual. When you use this function in a detector in your {anomaly-job}, it models the event rate for each status code and detects when a status code has an unusually @@ -168,19 +168,19 @@ For more information about those properties, see the For example, if you have the following number of events per bucket: -======================================== +==== 1,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,43,31,0,0,0,0,0,0,0,0,0,0,0,0,2,1 -======================================== +==== The `non_zero_count` function models only the following data: -======================================== +==== 1,22,2,43,31,2,1 -======================================== +==== .Example 5: Analyzing signatures with the high_non_zero_count function [source,console] diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc index 469f0bdb12b5..b9ae702d3ccb 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-aggregations.asciidoc @@ -34,6 +34,10 @@ to match. For example, if you use a `max` aggregation on a time field called descending order. Additional `composite` aggregation value sources are allowed, such as `terms`. +* The `size` parameter of the non-composite aggregations must match the +cardinality of your data. A greater value of the `size` parameter increases the +memory requirement of the aggregation. + * If you set the `summary_count_field_name` property to a non-null value, the {anomaly-job} expects to receive aggregated input. The property must be set to the name of the field that contains the count of raw data points that have been @@ -439,3 +443,85 @@ the `error` field. ---------------------------------- // NOTCONSOLE + +[discrete] +[[aggs-amd-dfeeds]] +== Using `aggregate_metric_double` field type in {dfeeds} + + +NOTE: It is not currently possible to use `aggregate_metric_double` type fields +in {dfeeds} without aggregations. + +You can use fields with the +{ref}/aggregate-metric-double.html[`aggregate_metric_double`] field type in a +{dfeed} with aggregations. It is required to retrieve the `value_count` of the +`aggregate_metric_double` filed in an aggregation and then use it as the +`summary_count_field_name` to provide the correct count that represents the +aggregation value. + +In the following example, `presum` is an `aggregate_metric_double` type field +that has all the possible metrics: `[ min, max, sum, value_count ]`. To use an +`avg` aggregation on this field, you need to perform a `value_count` aggregation +on `presum` and then set the field that contains the aggregated values +`my_count` as the `summary_count_field_name`: + + +[source,js] +---------------------------------- +{ + "analysis_config": { + "bucket_span": "1h", + "detectors": [ + { + "function": "avg", + "field_name": "my_avg" + } + ], + "summary_count_field_name": "my_count" <1> + }, + "data_description": { + "time_field": "timestamp" + }, + "datafeed_config": { + "indices": [ + "my_index" + ], + "datafeed_id": "datafeed-id", + "aggregations": { + "buckets": { + "date_histogram": { + "field": "time", + "fixed_interval": "360s", + "time_zone": "UTC" + }, + "aggregations": { + "timestamp": { + "max": {"field": "timestamp"} + }, + "my_avg": { <2> + "avg": { + "field": "presum" + } + }, + "my_count": { <3> + "value_count": { + "field": "presum" + } + } + } + } + } + } +} +---------------------------------- +// NOTCONSOLE + +<1> The field `my_count` is set as the `summary_count_field_name`. This field +contains aggregated values from the `presum` `aggregate_metric_double` type +field (refer to footnote 3). +<2> The `avg` aggregation to use on the `presum` `aggregate_metric_double` type +field. +<3> The `value_count` aggregation on the `presum` `aggregate_metric_double` type +field. This aggregated field must be set as the `summary_count_field_name` +(refer to footnote 1) to make it possible to use the `aggregate_metric_double` +type field in another aggregation. \ No newline at end of file diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index 7afaf88081b2..265b94d1d69d 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -30,38 +30,38 @@ ideal for this purpose. [[creating-ml-rules]] == Creating a rule -You can create {ml} rules in the {anomaly-job} wizard after you start the job, -from the job list, or under **{stack-manage-app} > {alerts-ui}**. - -On the *Create rule* window, give a name to the rule and optionally provide -tags. Specify the time interval for the rule to check detected anomalies or job -health changes. It is recommended to select an interval that is close to the -bucket span of the job. You can also select a notification option with the -_Notify_ selector. An alert remains active as long as the configured conditions -are met during the check interval. When there is no matching condition in the -next interval, the `Recovered` action group is invoked and the status of the -alert changes to `OK`. For more details, refer to the documentation of -{kibana-ref}/create-and-manage-rules.html#defining-rules-general-details[general rule details]. - -Select the rule type you want to create under the {ml} section and continue to -configure it depending on whether it is an -<> or an -<> rule. +In *{stack-manage-app} > {rules-ui}*, you can create both types of {ml} rules: [role="screenshot"] -image::images/ml-rule.jpg["Creating a new machine learning rule"] +image::images/ml-rule.png["Creating a new machine learning rule",500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. +When you create a {ml} rule, you must provide a time interval for the rule to +check detected anomalies or job health changes. It is recommended to select an +interval that is close to the bucket span of the job. + +You must also select a notification option, which affects how often alerts +generate actions. Options include running actions at each check interval, only +when the alert status changes, or at a custom action interval. For more +information about these options, refer to the +{kibana-ref}/create-and-manage-rules.html#defining-rules-general-details[General rule details]. + +In the *{ml-app}* app, you can create only {anomaly-detect} alert rules; create +them from the {anomaly-job} wizard after you start the job or from the +{anomaly-job} list. [[creating-anomaly-alert-rules]] === {anomaly-detect-cap} alert -Select the job that the rule applies to. +When you create an {anomaly-detect} alert rule, you must select the job that +the rule applies to. -You must select a type of {ml} result. In particular, you can create rules based -on bucket, record, or influencer results. +You must also select a type of {ml} result. In particular, you can create rules +based on bucket, record, or influencer results. [role="screenshot"] -image::images/ml-anomaly-alert-severity.jpg["Selecting result type, severity, and test interval", 500] +image::images/ml-anomaly-alert-severity.png["Selecting result type, severity, and test interval", 500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. For each rule, you can configure the `anomaly_score` that triggers the action. The `anomaly_score` indicates the significance of a given anomaly compared to @@ -98,8 +98,9 @@ are met. [[creating-anomaly-jobs-health-rules]] === {anomaly-jobs-cap} health -Select the job or group that the rule applies to. If you assign more jobs to the -group, they are included the next time the rule conditions are checked. +When you create an {anomaly-jobs} health rule, you must select the job or group +that the rule applies to. If you assign more jobs to the group, they are +included the next time the rule conditions are checked. You can also use a special character (`*`) to apply the rule to all your jobs. Jobs created after the rule are automatically included. You can exclude jobs @@ -131,7 +132,8 @@ _Errors in job messages_:: that occur after the rule is created; it does not look at historic behavior. [role="screenshot"] -image::images/ml-health-check-config.jpg["Selecting health checkers"] +image::images/ml-health-check-config.png["Selecting health checkers",500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. As the last step in the rule creation process, <> that occur when the conditions @@ -141,43 +143,35 @@ are met. [[defining-actions]] == Defining actions -Connect your rule to actions that use supported built-in integrations by -selecting a connector type. Connectors are {kib} services or third-party -integrations that perform an action when the rule conditions are met or the -alert is recovered. You can select in which case the action will run. - -[role="screenshot"] -image::images/ml-anomaly-alert-actions.jpg["Selecting connector type"] - -For example, you can choose _Slack_ as a connector type and configure it to send -a message to a channel you selected. You can also create an index connector that -writes the JSON object you configure to a specific index. It's also possible to -customize the notification messages. A list of variables is available to include -in the message, like job ID, anomaly score, time, top influencers, {dfeed} ID, -memory status and so on based on the selected rule type. Refer to -<> to see the full list of available variables by rule type. +Your rule can use connectors, which are {kib} services or supported third-party +integrations that run actions when the rule conditions are met or when the +alert is recovered. For details about creating connectors, refer to +{kibana-ref}/action-types.html[Connectors]. +For example, you can use a Slack connector to send a message to a channel. Or +you can use an index connector that writes an JSON object to a specific index. +It's also possible to customize the notification messages. There is a set of +variables that you can include in the message depending on the rule type; refer +to <>. [role="screenshot"] -image::images/ml-anomaly-alert-messages.jpg["Customizing your message"] - -After you save the configurations, the rule appears in the *{alerts-ui}* list -where you can check its status and see the overview of its configuration -information. +image::images/ml-anomaly-alert-messages.png["Customizing your message",500] +// NOTE: This is an autogenerated screenshot. Do not edit it directly. -The name of an alert is always the same as the job ID of the associated -{anomaly-job} that triggered it. You can mute the notifications for a particular -{anomaly-job} on the page of the rule that lists the individual alerts. You can -open it via *{alerts-ui}* by selecting the rule name. +After you save the configurations, the rule appears in the +*{stack-manage-app} > {rules-ui}* list; you can check its status and see the +overview of its configuration information. +When an alert occurs, it is always the same name as the job ID of the associated +{anomaly-job} that triggered it. If necessary, you can snooze rules to prevent +them from generating actions. For more details, refer to +{kibana-ref}/create-and-manage-rules.html#controlling-rules[Snooze and disable rules]. [[action-variables]] == Action variables -You can add different variables to your action. The following variables are -specific to the {ml} rule types. An `*` marks the variables that can be used for -actions of recovered alerts. - +The following variables are specific to the {ml} rule types. An asterisk (`*`) +marks the variables that you can use in actions related to recovered alerts. [[anomaly-alert-action-variables]] === {anomaly-detect-cap} alert action variables diff --git a/docs/reference/ml/images/ml-anomaly-alert-actions.jpg b/docs/reference/ml/images/ml-anomaly-alert-actions.jpg deleted file mode 100644 index a0b75152ca71..000000000000 Binary files a/docs/reference/ml/images/ml-anomaly-alert-actions.jpg and /dev/null differ diff --git a/docs/reference/ml/images/ml-anomaly-alert-messages.jpg b/docs/reference/ml/images/ml-anomaly-alert-messages.jpg deleted file mode 100644 index de5da557dd11..000000000000 Binary files a/docs/reference/ml/images/ml-anomaly-alert-messages.jpg and /dev/null differ diff --git a/docs/reference/ml/images/ml-anomaly-alert-messages.png b/docs/reference/ml/images/ml-anomaly-alert-messages.png new file mode 100644 index 000000000000..09216d566ab7 Binary files /dev/null and b/docs/reference/ml/images/ml-anomaly-alert-messages.png differ diff --git a/docs/reference/ml/images/ml-anomaly-alert-severity.jpg b/docs/reference/ml/images/ml-anomaly-alert-severity.jpg deleted file mode 100644 index dc6582ebbd84..000000000000 Binary files a/docs/reference/ml/images/ml-anomaly-alert-severity.jpg and /dev/null differ diff --git a/docs/reference/ml/images/ml-anomaly-alert-severity.png b/docs/reference/ml/images/ml-anomaly-alert-severity.png new file mode 100644 index 000000000000..9f15d464d2b6 Binary files /dev/null and b/docs/reference/ml/images/ml-anomaly-alert-severity.png differ diff --git a/docs/reference/ml/images/ml-health-check-config.jpg b/docs/reference/ml/images/ml-health-check-config.jpg deleted file mode 100644 index c235d7998452..000000000000 Binary files a/docs/reference/ml/images/ml-health-check-config.jpg and /dev/null differ diff --git a/docs/reference/ml/images/ml-health-check-config.png b/docs/reference/ml/images/ml-health-check-config.png new file mode 100644 index 000000000000..23a3833325bc Binary files /dev/null and b/docs/reference/ml/images/ml-health-check-config.png differ diff --git a/docs/reference/ml/images/ml-rule.jpg b/docs/reference/ml/images/ml-rule.jpg deleted file mode 100644 index 44973e785401..000000000000 Binary files a/docs/reference/ml/images/ml-rule.jpg and /dev/null differ diff --git a/docs/reference/ml/images/ml-rule.png b/docs/reference/ml/images/ml-rule.png new file mode 100644 index 000000000000..1008dadf2b66 Binary files /dev/null and b/docs/reference/ml/images/ml-rule.png differ diff --git a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc index 79d63848e66a..f0977430c9b0 100644 --- a/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc +++ b/docs/reference/ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc @@ -8,8 +8,6 @@ Clears a trained model deployment cache on all nodes where the trained model is assigned. -beta::[] - [[clear-trained-model-deployment-cache-request]] == {api-request-title} diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc index 132d8e9de070..d92d74d894a3 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model-deployment.asciidoc @@ -46,8 +46,8 @@ Controls the amount of time to wait for {infer} results. Defaults to 10 seconds. `docs`:: (Required, array) An array of objects to pass to the model for inference. The objects should -contain a field matching your configured trained model input. Typically, the -field name is `text_field`. Currently, only a single value is allowed. +contain a field matching your configured trained model input. Typically, the +field name is `text_field`. //// [[infer-trained-model-deployment-results]] @@ -62,7 +62,7 @@ field name is `text_field`. Currently, only a single value is allowed. [[infer-trained-model-deployment-example]] == {api-examples-title} -The response depends on the task the model is trained for. If it is a text +The response depends on the task the model is trained for. If it is a text classification task, the response is the score. For example: [source,console] @@ -123,7 +123,7 @@ The API returns in this case: ---- // NOTCONSOLE -Zero-shot classification tasks require extra configuration defining the class +Zero-shot classification tasks require extra configuration defining the class labels. These labels are passed in the zero-shot inference config. [source,console] @@ -150,7 +150,7 @@ POST _ml/trained_models/model2/deployment/_infer -------------------------------------------------- // TEST[skip:TBD] -The API returns the predicted label and the confidence, as well as the top +The API returns the predicted label and the confidence, as well as the top classes: [source,console-result] @@ -205,7 +205,7 @@ POST _ml/trained_models/model2/deployment/_infer -------------------------------------------------- // TEST[skip:TBD] -When the input has been truncated due to the limit imposed by the model's +When the input has been truncated due to the limit imposed by the model's `max_sequence_length` the `is_truncated` field appears in the response. [source,console-result] diff --git a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc index 9f64a4a0e10d..ca68a1c78010 100644 --- a/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc +++ b/docs/reference/ml/trained-models/apis/infer-trained-model.asciidoc @@ -6,14 +6,12 @@ Infer trained model ++++ -Evaluates a trained model. The model may be any supervised model either trained +Evaluates a trained model. The model may be any supervised model either trained by {dfanalytics} or imported. -NOTE: For model deployments with caching enabled, results may be returned +NOTE: For model deployments with caching enabled, results may be returned directly from the {infer} cache. -beta::[] - [[infer-trained-model-request]] == {api-request-title} @@ -51,9 +49,7 @@ Controls the amount of time to wait for {infer} results. Defaults to 10 seconds. (Required, array) An array of objects to pass to the model for inference. The objects should contain the fields matching your configured trained model input. Typically for -NLP models, the field name is `text_field`. Currently for NLP models, only a -single value is allowed. For {dfanalytics} or imported classification or -regression models, more than one value is allowed. +NLP models, the field name is `text_field`. //Begin inference_config `inference_config`:: @@ -106,7 +102,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-fill-mask] ===== `num_top_classes`:::: (Optional, integer) -Number of top predicted tokens to return for replacing the mask token. Defaults +Number of top predicted tokens to return for replacing the mask token. Defaults to `0`. `results_field`:::: @@ -277,7 +273,7 @@ The maximum amount of words in the answer. Defaults to `15`. `num_top_classes`:::: (Optional, integer) -The number the top found answers to return. Defaults to `0`, meaning only the +The number the top found answers to return. Defaults to `0`, meaning only the best found answer is returned. `question`:::: @@ -374,7 +370,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classific `num_top_classes`:::: (Optional, integer) -Specifies the number of top class predictions to return. Defaults to all classes +Specifies the number of top class predictions to return. Defaults to all classes (-1). `results_field`:::: @@ -886,7 +882,7 @@ POST _ml/trained_models/model2/_infer -------------------------------------------------- // TEST[skip:TBD] -When the input has been truncated due to the limit imposed by the model's +When the input has been truncated due to the limit imposed by the model's `max_sequence_length` the `is_truncated` field appears in the response. [source,console-result] diff --git a/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc index 323f6953061a..53cad5cffa37 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-model-definition-part.asciidoc @@ -8,8 +8,6 @@ Creates part of a trained model definition. -beta::[] - [[ml-put-trained-model-definition-part-request]] == {api-request-title} diff --git a/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc b/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc index c252fcd8f7d5..154aad8350e0 100644 --- a/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc +++ b/docs/reference/ml/trained-models/apis/put-trained-model-vocabulary.asciidoc @@ -9,8 +9,6 @@ Creates a trained model vocabulary. This is supported only for natural language processing (NLP) models. -beta::[] - [[ml-put-trained-model-vocabulary-request]] == {api-request-title} @@ -56,7 +54,7 @@ preference. Example: ["f o", "fo o"]. Must be provided for RoBERTa and BART styl The following example shows how to create a model vocabulary for a previously stored trained model configuration. -[source,js] +[source,console] -------------------------------------------------- PUT _ml/trained_models/elastic__distilbert-base-uncased-finetuned-conll03-english/vocabulary { @@ -67,7 +65,7 @@ PUT _ml/trained_models/elastic__distilbert-base-uncased-finetuned-conll03-englis ] } -------------------------------------------------- -// NOTCONSOLE +// TEST[s/\.\.\./"[PAD]"/ skip:TBD] The API returns the following results: diff --git a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc index 104d6f3f9e76..b3c1c2ddc488 100644 --- a/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/start-trained-model-deployment.asciidoc @@ -8,8 +8,6 @@ Starts a new trained model deployment. -beta::[] - [[start-trained-model-deployment-request]] == {api-request-title} @@ -30,19 +28,19 @@ in an ingest pipeline or directly in the <> API. Scaling inference performance can be achieved by setting the parameters `number_of_allocations` and `threads_per_allocation`. -Increasing `threads_per_allocation` means more threads are used when an -inference request is processed on a node. This can improve inference speed for +Increasing `threads_per_allocation` means more threads are used when an +inference request is processed on a node. This can improve inference speed for certain models. It may also result in improvement to throughput. -Increasing `number_of_allocations` means more threads are used to process -multiple inference requests in parallel resulting in throughput improvement. -Each model allocation uses a number of threads defined by +Increasing `number_of_allocations` means more threads are used to process +multiple inference requests in parallel resulting in throughput improvement. +Each model allocation uses a number of threads defined by `threads_per_allocation`. -Model allocations are distributed across {ml} nodes. All allocations assigned to -a node share the same copy of the model in memory. To avoid thread -oversubscription which is detrimental to performance, model allocations are -distributed in such a way that the total number of used threads does not surpass +Model allocations are distributed across {ml} nodes. All allocations assigned to +a node share the same copy of the model in memory. To avoid thread +oversubscription which is detrimental to performance, model allocations are +distributed in such a way that the total number of used threads does not surpass the node's allocated processors. [[start-trained-model-deployment-path-params]] @@ -57,9 +55,9 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] `cache_size`:: (Optional, <>) -The inference cache size (in memory outside the JVM heap) per node for the -model. The default value is the size of the model as reported by the -`model_size_bytes` field in the <>. To disable the +The inference cache size (in memory outside the JVM heap) per node for the +model. The default value is the size of the model as reported by the +`model_size_bytes` field in the <>. To disable the cache, `0b` can be provided. `number_of_allocations`:: @@ -73,17 +71,17 @@ The priority of the deployment. The default value is `normal`. There are two priority settings: + -- -* `normal`: Use this for deployments in production. The deployment allocations +* `normal`: Use this for deployments in production. The deployment allocations are distributed so that node processors are not oversubscribed. -* `low`: Use this for testing model functionality. The intention is that these -deployments are not sent a high volume of input. The deployment is required to -have a single allocation with just one thread. Low priority deployments may be -assigned on nodes that already utilize all their processors but will be given a -lower CPU priority than normal deployments. Low priority deployments may be +* `low`: Use this for testing model functionality. The intention is that these +deployments are not sent a high volume of input. The deployment is required to +have a single allocation with just one thread. Low priority deployments may be +assigned on nodes that already utilize all their processors but will be given a +lower CPU priority than normal deployments. Low priority deployments may be unassigned in order to satisfy more allocations of normal priority deployments. -- -WARNING: Heavy usage of low priority deployments may impact performance of +WARNING: Heavy usage of low priority deployments may impact performance of normal priority deployments. `queue_capacity`:: @@ -91,20 +89,20 @@ normal priority deployments. Controls how many inference requests are allowed in the queue at a time. Every machine learning node in the cluster where the model can be allocated has a queue of this size; when the number of requests exceeds the total value, -new requests are rejected with a 429 error. Defaults to 1024. Max allowed value +new requests are rejected with a 429 error. Defaults to 1024. Max allowed value is 1000000. `threads_per_allocation`:: (Optional, integer) -Sets the number of threads used by each model allocation during inference. This -generally increases the speed per inference request. The inference process is a -compute-bound process; `threads_per_allocations` must not exceed the number of -available allocated processors per node. Defaults to 1. Must be a power of 2. +Sets the number of threads used by each model allocation during inference. This +generally increases the speed per inference request. The inference process is a +compute-bound process; `threads_per_allocations` must not exceed the number of +available allocated processors per node. Defaults to 1. Must be a power of 2. Max allowed value is 32. `timeout`:: (Optional, time) -Controls the amount of time to wait for the model to deploy. Defaults to 20 +Controls the amount of time to wait for the model to deploy. Defaults to 30 seconds. `wait_for`:: diff --git a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc index 6fb2b5bfa3c2..d5de2b61cfe2 100644 --- a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc @@ -8,8 +8,6 @@ Stops a trained model deployment. -beta::[] - [[stop-trained-model-deployment-request]] == {api-request-title} diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 83adaef9ec1a..024ca80bb5e3 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -1,8 +1,9 @@ [[misc-cluster-settings]] -==== Miscellaneous cluster settings +=== Miscellaneous cluster settings +[discrete] [[cluster-read-only]] -===== Metadata +==== Metadata An entire cluster may be set to read-only with the following setting: @@ -21,9 +22,9 @@ WARNING: Don't rely on this setting to prevent changes to your cluster. Any user with access to the <> API can make the cluster read-write again. - +[discrete] [[cluster-shard-limit]] -===== Cluster shard limit +==== Cluster shard limit There is a soft limit on the number of shards in a cluster, based on the number of nodes in the cluster. This is intended to prevent operations which may @@ -101,8 +102,9 @@ number of shards for each node, use the setting. -- +[discrete] [[user-defined-data]] -===== User-defined cluster metadata +==== User-defined cluster metadata User-defined metadata can be stored and retrieved using the Cluster Settings API. This can be used to store arbitrary, infrequently-changing data about the cluster @@ -127,8 +129,9 @@ metadata will be viewable by anyone with access to the <> API, and is recorded in the {es} logs. +[discrete] [[cluster-max-tombstones]] -===== Index tombstones +==== Index tombstones The cluster state maintains index tombstones to explicitly denote indices that have been deleted. The number of tombstones maintained in the cluster state is @@ -148,8 +151,9 @@ include::{es-repo-dir}/indices/dangling-indices-list.asciidoc[tag=dangling-index You can use the <> to manage this situation. +[discrete] [[cluster-logger]] -===== Logger +==== Logger The settings which control logging can be updated <> with the `logger.` prefix. For instance, to increase the logging level of the @@ -165,9 +169,9 @@ PUT /_cluster/settings } ------------------------------- - +[discrete] [[persistent-tasks-allocation]] -===== Persistent tasks allocation +==== Persistent tasks allocation Plugins can create a kind of tasks called persistent tasks. Those tasks are usually long-lived tasks and are stored in the cluster state, allowing the diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index b9fd5cb868dd..42282e00e81d 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -35,7 +35,7 @@ one of the active allocation ids in the cluster state. `cluster.routing.allocation.node_concurrent_recoveries`:: (<>) A shortcut to set both `cluster.routing.allocation.node_concurrent_incoming_recoveries` and - `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. + `cluster.routing.allocation.node_concurrent_outgoing_recoveries`. Defaults to 2. `cluster.routing.allocation.node_initial_primaries_recoveries`:: @@ -57,17 +57,18 @@ one of the active allocation ids in the cluster state. [[shards-rebalancing-settings]] ==== Shard rebalancing settings -A cluster is _balanced_ when it has an equal number of shards on each node -without having a concentration of shards from any index on any node. {es} runs -an automatic process called _rebalancing_ which moves shards between the nodes -in your cluster to improve its balance. Rebalancing obeys all other shard -allocation rules such as <> and <> which may prevent it from -completely balancing the cluster. In that case, rebalancing strives to achieve -the most balanced cluster possible within the rules you have configured. If you -are using <> then {es} automatically applies allocation -filtering rules to place each shard within the appropriate tier. These rules -mean that the balancer works independently within each tier. +A cluster is _balanced_ when it has an equal number of shards on each node, with +all nodes needing equal resources, without having a concentration of shards from +any index on any node. {es} runs an automatic process called _rebalancing_ which +moves shards between the nodes in your cluster to improve its balance. +Rebalancing obeys all other shard allocation rules such as +<> and +<> which may prevent it from completely +balancing the cluster. In that case, rebalancing strives to achieve the most +balanced cluster possible within the rules you have configured. If you are using +<> then {es} automatically applies allocation filtering +rules to place each shard within the appropriate tier. These rules mean that the +balancer works independently within each tier. You can use the following settings to control the rebalancing of shards across the cluster: @@ -84,7 +85,6 @@ Enable or disable rebalancing for specific kinds of shards: * `none` - No shard balancing of any kind are allowed for any indices. -- - `cluster.routing.allocation.allow_rebalance`:: + -- @@ -98,13 +98,32 @@ Specify when shard rebalancing is allowed: -- `cluster.routing.allocation.cluster_concurrent_rebalance`:: - (<>) - Allow to control how many concurrent shard rebalances are - allowed cluster wide. Defaults to `2`. Note that this setting - only controls the number of concurrent shard relocations due - to imbalances in the cluster. This setting does not limit shard - relocations due to <> or <>. +(<>) +Defines the number of concurrent shard rebalances are allowed across the whole +cluster. Defaults to `2`. Note that this setting only controls the number of +concurrent shard relocations due to imbalances in the cluster. This setting does +not limit shard relocations due to +<> or +<>. + +`cluster.routing.allocation.type`:: ++ +-- +Selects the algorithm used for computing the cluster balance. Defaults to +`desired_balance` which selects the _desired balance allocator_. This allocator +runs a background task which computes the desired balance of shards in the +cluster. Once this background task completes, {es} moves shards to their +desired locations. + +May also be set to `balanced` to select the legacy _balanced allocator_. This +allocator was the default allocator in versions of {es} before 8.6.0. It runs +in the foreground, preventing the master from doing other work in parallel. It +works by selecting a small number of shard movements which immediately improve +the balance of the cluster, and when those shard movements complete it runs +again and selects another few shards to move. Since this allocator makes its +decisions based only on the current state of the cluster, it will sometimes +move a shard several times while balancing the cluster. +-- [[shards-rebalancing-heuristics]] ==== Shard balancing heuristics settings @@ -114,28 +133,55 @@ of shards, and then moving shards between nodes to reduce the weight of the heavier nodes and increase the weight of the lighter ones. The cluster is balanced when there is no possible shard movement that can bring the weight of any node closer to the weight of any other node by more than a configurable -threshold. The following settings allow you to control the details of these -calculations. +threshold. + +The weight of a node depends on the number of shards it holds and on the total +estimated resource usage of those shards expressed in terms of the size of the +shard on disk and the number of threads needed to support write traffic to the +shard. {es} estimates the resource usage of shards belonging to data streams +when they are created by a rollover. The estimated disk size of the new shard +is the mean size of the other shards in the data stream. The estimated write +load of the new shard is a weighted average of the actual write loads of recent +shards in the data stream. Shards that do not belong to the write index of a +data stream have an estimated write load of zero. + +The following settings control how {es} combines these values into an overall +measure of each node's weight. `cluster.routing.allocation.balance.shard`:: - (<>) - Defines the weight factor for the total number of shards allocated on a node - (float). Defaults to `0.45f`. Raising this raises the tendency to - equalize the number of shards across all nodes in the cluster. +(float, <>) +Defines the weight factor for the total number of shards allocated to each node. +Defaults to `0.45f`. Raising this value increases the tendency of {es} to +equalize the total number of shards across nodes ahead of the other balancing +variables. `cluster.routing.allocation.balance.index`:: - (<>) - Defines the weight factor for the number of shards per index allocated - on a specific node (float). Defaults to `0.55f`. Raising this raises the - tendency to equalize the number of shards per index across all nodes in - the cluster. +(float, <>) +Defines the weight factor for the number of shards per index allocated to each +node. Defaults to `0.55f`. Raising this value increases the tendency of {es} to +equalize the number of shards of each index across nodes ahead of the other +balancing variables. + +`cluster.routing.allocation.balance.disk_usage`:: +(float, <>) +Defines the weight factor for balancing shards according to their predicted disk +size in bytes. Defaults to `2e-11f`. Raising this value increases the tendency +of {es} to equalize the total disk usage across nodes ahead of the other +balancing variables. + +`cluster.routing.allocation.balance.write_load`:: +(float, <>) +Defines the weight factor for the write load of each shard, in terms of the +estimated number of indexing threads needed by the shard. Defaults to `10.0f`. +Raising this value increases the tendency of {es} to equalize the total write +load across nodes ahead of the other balancing variables. `cluster.routing.allocation.balance.threshold`:: - (<>) - Minimal optimization value of operations that should be performed (non - negative float). Defaults to `1.0f`. Raising this will cause the cluster - to be less aggressive about optimizing the shard balance. - +(float, <>) +The minimum improvement in weight which triggers a rebalancing shard movement. +Defaults to `1.0f`. Raising this value will cause {es} to stop rebalancing +shards sooner, leaving the cluster in a more unbalanced state. NOTE: Regardless of the result of the balancing algorithm, rebalancing might -not be allowed due to forced awareness or allocation filtering. +not be allowed due to allocation rules such as forced awareness and allocation +filtering. diff --git a/docs/reference/modules/discovery/discovery-settings.asciidoc b/docs/reference/modules/discovery/discovery-settings.asciidoc index c3410900896e..722b9382732f 100644 --- a/docs/reference/modules/discovery/discovery-settings.asciidoc +++ b/docs/reference/modules/discovery/discovery-settings.asciidoc @@ -201,7 +201,7 @@ Sets how long the master node waits for each cluster state update to be completely published to all nodes, unless `discovery.type` is set to `single-node`. The default value is `30s`. See <>. -`cluster.discovery_configuration_check.interval `:: +`cluster.discovery_configuration_check.interval`:: (<>) Sets the interval of some checks that will log warnings about an incorrect discovery configuration. The default value is `30s`. diff --git a/docs/reference/modules/discovery/fault-detection.asciidoc b/docs/reference/modules/discovery/fault-detection.asciidoc index f0cf3e4c18f0..66889e71d0ab 100644 --- a/docs/reference/modules/discovery/fault-detection.asciidoc +++ b/docs/reference/modules/discovery/fault-detection.asciidoc @@ -52,7 +52,7 @@ logs. * The master may appear busy due to frequent cluster state updates. To troubleshoot a cluster in this state, first ensure the cluster has a -<>. Next, focus on the nodes +<>. Next, focus on the nodes unexpectedly leaving the cluster ahead of all other issues. It will not be possible to solve other issues until the cluster has a stable master node and stable node membership. @@ -62,23 +62,33 @@ tools only offer a view of the state of the cluster at a single point in time. Instead, look at the cluster logs to see the pattern of behaviour over time. Focus particularly on logs from the elected master. When a node leaves the cluster, logs for the elected master include a message like this (with line -breaks added for clarity): +breaks added to make it easier to read): [source,text] ---- -[2022-03-21T11:02:35,513][INFO ][o.e.c.s.MasterService ] - [instance-0000000000] node-left[ - {instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{aNlyORLASam1ammv2DzYXA}{172.27.47.21}{172.27.47.21:19054}{m} - reason: disconnected, - {tiebreaker-0000000003}{UNw_RuazQCSBskWZV8ID_w}{bltyVOQ-RNu20OQfTHSLtA}{172.27.161.154}{172.27.161.154:19251}{mv} - reason: disconnected - ], term: 14, version: 1653415, ... +[2022-03-21T11:02:35,513][INFO ][o.e.c.c.NodeLeftExecutor] [instance-0000000000] + node-left: [{instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{aNlyORLASam1ammv2DzYXA}{172.27.47.21}{172.27.47.21:19054}{m}] + with reason [disconnected] ---- -This message says that the `MasterService` on the elected master -(`instance-0000000000`) is processing a `node-left` task. It lists the nodes -that are being removed and the reasons for their removal. Other nodes may log -similar messages, but report fewer details: +This message says that the `NodeLeftExecutor` on the elected master +(`instance-0000000000`) processed a `node-left` task, identifying the node that +was removed and the reason for its removal. When the node joins the cluster +again, logs for the elected master will include a message like this (with line +breaks added to make it easier to read): + +[source,text] +---- +[2022-03-21T11:02:59,892][INFO ][o.e.c.c.NodeJoinExecutor] [instance-0000000000] + node-join: [{instance-0000000004}{bfcMDTiDRkietFb9v_di7w}{UNw_RuazQCSBskWZV8ID_w}{172.27.47.21}{172.27.47.21:19054}{m}] + with reason [joining after restart, removed [24s] ago with reason [disconnected]] +---- + +This message says that the `NodeJoinExecutor` on the elected master +(`instance-0000000000`) processed a `node-join` task, identifying the node that +was added to the cluster and the reason for the task. + +Other nodes may log similar messages, but report fewer details: [source,text] ---- @@ -89,9 +99,10 @@ similar messages, but report fewer details: }, term: 14, version: 1653415, reason: Publication{term=14, version=1653415} ---- -Focus on the one from the `MasterService` which is only emitted on the elected -master, since it contains more details. If you don't see the messages from the -`MasterService`, check that: +These messages are not especially useful for troubleshooting, so focus on the +ones from the `NodeLeftExecutor` and `NodeJoinExecutor` which are only emitted +on the elected master and which contain more details. If you don't see the +messages from the `NodeLeftExecutor` and `NodeJoinExecutor`, check that: * You're looking at the logs for the elected master node. @@ -104,18 +115,14 @@ start or stop following the elected master. You can use these messages to determine each node's view of the state of the master over time. If a node restarts, it will leave the cluster and then join the cluster again. -When it rejoins, the `MasterService` will log that it is processing a -`node-join` task. You can tell from the master logs that the node was restarted -because the `node-join` message will indicate that it is -`joining after restart`. In older {es} versions, you can also determine that a -node restarted by looking at the second "ephemeral" ID in the `node-left` and -subsequent `node-join` messages. This ephemeral ID is different each time the -node starts up. If a node is unexpectedly restarting, you'll need to look at -the node's logs to see why it is shutting down. +When it rejoins, the `NodeJoinExecutor` will log that it processed a +`node-join` task indicating that the node is `joining after restart`. If a node +is unexpectedly restarting, look at the node's logs to see why it is shutting +down. If the node did not restart then you should look at the reason for its -departure in the `node-left` message, which is reported after each node. There -are three possible reasons: +departure more closely. Each reason has different troubleshooting steps, +described below. There are three possible reasons: * `disconnected`: The connection from the master node to the removed node was closed. @@ -134,6 +141,10 @@ control this mechanism. ===== Diagnosing `disconnected` nodes +Nodes typically leave the cluster with reason `disconnected` when they shut +down, but if they rejoin the cluster without restarting then there is some +other problem. + {es} is designed to run on a fairly reliable network. It opens a number of TCP connections between nodes and expects these connections to remain open forever. If a connection is closed then {es} will try and reconnect, so the occasional @@ -190,10 +201,32 @@ logger.org.elasticsearch.cluster.coordination.LagDetector: DEBUG When this logger is enabled, {es} will attempt to run the <> API on the faulty node and report the results in -the logs on the elected master. +the logs on the elected master. The results are compressed, encoded, and split +into chunks to avoid truncation: + +[source,text] +---- +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 1]: H4sIAAAAAAAA/x... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 2]: p7x3w1hmOQVtuV... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 3]: v7uTboMGDbyOy+... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] [part 4]: 4tse0RnPnLeDNN... +[DEBUG][o.e.c.c.LagDetector ] [master] hot threads from node [{node}{g3cCUaMDQJmQ2ZLtjr-3dg}{10.0.0.1:9300}] lagging at version [183619] despite commit of cluster state version [183620] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) +---- + +To reconstruct the output, base64-decode the data and decompress it using +`gzip`. For instance, on Unix-like systems: + +[source,sh] +---- +cat lagdetector.log | sed -e 's/.*://' | base64 --decode | gzip --decompress +---- ===== Diagnosing `follower check retry count exceeded` nodes +Nodes sometimes leave the cluster with reason `follower check retry count +exceeded` when they shut down, but if they rejoin the cluster without +restarting then there is some other problem. + {es} needs every node to respond to network messages successfully and reasonably quickly. If a node rejects requests or does not respond at all then it can be harmful to the cluster. If enough consecutive checks fail then the @@ -340,3 +373,39 @@ checks are `transport_worker` and `cluster_coordination` threads, for which there should never be a long wait. There may also be evidence of long waits for threads in the {es} logs. Refer to <> for more information. + +===== Diagnosing `ShardLockObtainFailedException` failures + +If a node leaves and rejoins the cluster then {es} will usually shut down and +re-initialize its shards. If the shards do not shut down quickly enough then +{es} may fail to re-initialize them due to a `ShardLockObtainFailedException`. + +To gather more information about the reason for shards shutting down slowly, +configure the following logger: + +[source,yaml] +---- +logger.org.elasticsearch.env.NodeEnvironment: DEBUG +---- + +When this logger is enabled, {es} will attempt to run the +<> API whenever it encounters a +`ShardLockObtainFailedException`. The results are compressed, encoded, and +split into chunks to avoid truncation: + +[source,text] +---- +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 1]: H4sIAAAAAAAA/x... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 2]: p7x3w1hmOQVtuV... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 3]: v7uTboMGDbyOy+... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] [part 4]: 4tse0RnPnLeDNN... +[DEBUG][o.e.e.NodeEnvironment ] [master] hot threads while failing to obtain shard lock for [index][0] (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines) +---- + +To reconstruct the output, base64-decode the data and decompress it using +`gzip`. For instance, on Unix-like systems: + +[source,sh] +---- +cat shardlock.log | sed -e 's/.*://' | base64 --decode | gzip --decompress +---- diff --git a/docs/reference/modules/indices/search-settings.asciidoc b/docs/reference/modules/indices/search-settings.asciidoc index 17fe9ce08b54..e43ec076578d 100644 --- a/docs/reference/modules/indices/search-settings.asciidoc +++ b/docs/reference/modules/indices/search-settings.asciidoc @@ -6,23 +6,24 @@ limits. [[indices-query-bool-max-clause-count]] `indices.query.bool.max_clause_count`:: +deprecated:[8.0.0] (<>, integer) -Maximum number of clauses a query can contain. Defaults to `4096`. +This deprecated setting has no effect. + -This setting limits the total number of clauses that a query tree can have. The default of 4096 -is quite high and should normally be sufficient. This limit applies to the rewritten query, so -not only `bool` queries can contribute high numbers of clauses, but also all queries that rewrite -to `bool` queries internally such as `fuzzy` queries. The limit is in place to prevent searches -from becoming too large, and taking up too much CPU and memory. In case you're considering -increasing this setting, make sure you've exhausted all other options to avoid having to do this. -Higher values can lead to performance degradations and memory issues, especially in clusters with -a high load or few resources. - -Elasticsearch offers some tools to avoid running into issues with regards to the maximum number of -clauses such as the <> query, which allows querying many distinct -values while still counting as a single clause, or the <> option -of <> fields, which allows executing prefix queries that expand to a high -number of terms as a single term query. +{es} will now dynamically set the maximum number of allowed clauses in a query, using +a heuristic based on the size of the search thread pool and the size of the heap allocated to +the JVM. This limit has a minimum value of 1024 and will in most cases be larger (for example, +a node with 30Gb RAM and 48 CPUs will have a maximum clause count of around 27,000). Larger +heaps lead to higher values, and larger thread pools result in lower values. ++ +Queries with many clauses should be avoided whenever possible. If you previously bumped this +setting to accommodate heavy queries, you might need to increase the amount of memory available +to {es}, or to reduce the size of your search thread pool so that more memory is +available to each concurrent search. ++ +In previous versions of Lucene you could get around this limit by nesting boolean queries +within each other, but the limit is now based on the total number of leaf queries within the +query as a whole and this workaround will no longer help. [[search-settings-max-buckets]] `search.max_buckets`:: diff --git a/docs/reference/monitoring/configuring-filebeat.asciidoc b/docs/reference/monitoring/configuring-filebeat.asciidoc index 4288071c1ba4..196a9f080f2d 100644 --- a/docs/reference/monitoring/configuring-filebeat.asciidoc +++ b/docs/reference/monitoring/configuring-filebeat.asciidoc @@ -26,14 +26,6 @@ from impacting the performance of your production cluster. See -- -. Enable the collection of monitoring data on your cluster. -+ --- -include::configuring-metricbeat.asciidoc[tag=enable-collection] - -For more information, see <> and <>. --- - . Identify which logs you want to monitor. + -- diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index 70c77c80bb34..3da47f584ee9 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -13,38 +13,6 @@ as described in <>. image::monitoring/images/metricbeat.png[Example monitoring architecture] -. Enable the collection of monitoring data. -+ --- -// tag::enable-collection[] -Set `xpack.monitoring.collection.enabled` to `true` on the -production cluster. By default, it is disabled (`false`). - -You can use the following APIs to review and change this setting: - -[source,console] ----------------------------------- -GET _cluster/settings ----------------------------------- - -[source,console] ----------------------------------- -PUT _cluster/settings -{ - "persistent": { - "xpack.monitoring.collection.enabled": true - } -} ----------------------------------- -// TEST[warning:[xpack.monitoring.collection.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.] - -If {es} {security-features} are enabled, you must have `monitor` cluster privileges to -view the cluster settings and `manage` cluster privileges to change them. -// end::enable-collection[] - -For more information, see <> and <>. --- - . {metricbeat-ref}/metricbeat-installation-configuration.html[Install {metricbeat}]. Ideally install a single {metricbeat} instance configured with `scope: cluster` and configure `hosts` to point to an endpoint (e.g. a @@ -187,28 +155,4 @@ For more information about these configuration options, see . {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}] on each node. -. Disable the default collection of {es} monitoring metrics. -+ --- -Set `xpack.monitoring.elasticsearch.collection.enabled` to `false` on the -production cluster. - -You can use the following API to change this setting: - -[source,console] ----------------------------------- -PUT _cluster/settings -{ - "persistent": { - "xpack.monitoring.elasticsearch.collection.enabled": false - } -} ----------------------------------- -// TEST[warning:[xpack.monitoring.elasticsearch.collection.enabled] setting was deprecated in Elasticsearch and will be removed in a future release.] - -If {es} {security-features} are enabled, you must have `monitor` cluster -privileges to view the cluster settings and `manage` cluster privileges -to change them. --- - . {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index b2843b92309e..08522708554f 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -145,7 +145,6 @@ A good default choice might be to use the `_seq_no` field, whose only drawback is that scores will change if the document is updated since update operations also update the value of the `_seq_no` field. - [[decay-functions-numeric-fields]] ====== Decay functions for numeric fields You can read more about decay functions @@ -333,13 +332,13 @@ through a script: [[decay-functions]] ====== `decay` functions -The `script_score` query has equivalent <> -that can be used in script. +The `script_score` query has equivalent <> that can be used in scripts. include::{es-repo-dir}/vectors/vector-functions.asciidoc[] [[score-explanation]] -====== Explain request +===== Explain request Using an <> provides an explanation of how the parts of a score were computed. The `script_score` query can add its own explanation by setting the `explanation` parameter: [source,console] diff --git a/docs/reference/query-dsl/wrapper-query.asciidoc b/docs/reference/query-dsl/wrapper-query.asciidoc index b8b9626202e7..fe0d72684943 100644 --- a/docs/reference/query-dsl/wrapper-query.asciidoc +++ b/docs/reference/query-dsl/wrapper-query.asciidoc @@ -20,7 +20,4 @@ GET /_search <1> Base64 encoded string: `{"term" : { "user.id" : "kimchy" }}` -This query is more useful in the context of the Java high-level REST client or -transport client to also accept queries as json formatted string. -In these cases queries can be specified as a json or yaml formatted string or -as a query builder (which is a available in the Java high-level REST client). \ No newline at end of file +This query is more useful in the context of Spring Data Elasticsearch. It's the way a user can add custom queries when using Spring Data repositories. The user can add a @Query() annotation to a repository method. When such a method is called we do a parameter replacement in the query argument of the annotation and then send this as the query part of a search request. \ No newline at end of file diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 237d77d0a0c9..658a4f0a823f 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -1868,3 +1868,11 @@ For more information about reindexing from a remote cluster, refer to === Infer trained model deployment API See <>. + +[role="exclude",id="watching-meetup-data"] +=== Watching event data + +This example has been removed, as the Meetup.com streaming API has been +discontinued. + +Refer to <> for other Watcher examples. \ No newline at end of file diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index b2af3c1db205..5b557a23c32b 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,7 +6,12 @@ This section summarizes the changes in each release. + * <> +* <> +* <> +* <> +* <> * <> * <> * <> @@ -36,6 +41,10 @@ This section summarizes the changes in each release. -- include::release-notes/8.7.0.asciidoc[] +include::release-notes/8.6.1.asciidoc[] +include::release-notes/8.6.0.asciidoc[] +include::release-notes/8.5.3.asciidoc[] +include::release-notes/8.5.2.asciidoc[] include::release-notes/8.5.1.asciidoc[] include::release-notes/8.5.0.asciidoc[] include::release-notes/8.4.3.asciidoc[] diff --git a/docs/reference/release-notes/8.1.0.asciidoc b/docs/reference/release-notes/8.1.0.asciidoc index 8cce7d90c03e..c1c44871d641 100644 --- a/docs/reference/release-notes/8.1.0.asciidoc +++ b/docs/reference/release-notes/8.1.0.asciidoc @@ -143,6 +143,9 @@ CRUD:: Cluster Coordination:: * Remove last few mentions of Zen discovery {es-pull}80410[#80410] +Search:: +* Deprecate the `indices.query.bool.max_clause_count` node setting {es-pull}81525[#81525] (issue: {es-issue}46433[#46433]) + SQL:: * Deprecate `index_include_frozen` request parameter {es-pull}83943[#83943] (issue: {es-issue}81939[#81939]) diff --git a/docs/reference/release-notes/8.4.0.asciidoc b/docs/reference/release-notes/8.4.0.asciidoc index e83f9fb83472..daa6f623f771 100644 --- a/docs/reference/release-notes/8.4.0.asciidoc +++ b/docs/reference/release-notes/8.4.0.asciidoc @@ -3,6 +3,31 @@ Also see <>. +[[known-issues-8.4.0]] +[float] +=== Known issues + +// tag::ml-pre-7-datafeeds-known-issue[] +* {ml-cap} {dfeeds} cannot be listed if any are not modified since version 6.x ++ +If you have a {dfeed} that was created in version 5.x or 6.x and has not +been updated since 7.0, it is not possible to list {dfeeds} in +8.4 and 8.5. This means that {anomaly-jobs} cannot be managed using +{kib}. This issue is fixed in 8.6.0. ++ +If you upgrade to 8.4 or 8.5 with such a {dfeed}, you need to +work around the problem by updating each {dfeed}'s authorization information +using https://support.elastic.dev/knowledge/view/b5a879db[these steps]. +// end::ml-pre-7-datafeeds-known-issue[] + +// tag::file-based-settings-deadlock-known-issue[] +* Orchestrators which use a `settings.json` file to configure Elasticsearch may +encounter deadlocks during master elections (issue: {es-issue}92812[#92812]) ++ +To resolve the deadlock, remove the `settings.json` file and restart the +affected node. +// end::file-based-settings-deadlock-known-issue[] + [[bug-8.4.0]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.4.1.asciidoc b/docs/reference/release-notes/8.4.1.asciidoc index c0ce679621fb..10f6f60010c3 100644 --- a/docs/reference/release-notes/8.4.1.asciidoc +++ b/docs/reference/release-notes/8.4.1.asciidoc @@ -3,14 +3,6 @@ Also see <>. -[[bug-8.4.1]] -[float] -=== Bug fixes - -Machine Learning:: -* [ML] Validate trained model deployment `queue_capacity` limit {es-pull}89611[#89611] (issue: {es-issue}89555[#89555]) - - [[known-issues-8.4.1]] [float] === Known issues @@ -18,3 +10,14 @@ Machine Learning:: * When using date range search with format that does not have all date fields (missing month or day) an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) (issue: {es-issue}90187[#90187]) + +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + +[[bug-8.4.1]] +[float] +=== Bug fixes + +Machine Learning:: +* [ML] Validate trained model deployment `queue_capacity` limit {es-pull}89611[#89611] (issue: {es-issue}89555[#89555]) diff --git a/docs/reference/release-notes/8.4.2.asciidoc b/docs/reference/release-notes/8.4.2.asciidoc index 2f6481876bd1..605c7d136179 100644 --- a/docs/reference/release-notes/8.4.2.asciidoc +++ b/docs/reference/release-notes/8.4.2.asciidoc @@ -20,6 +20,9 @@ This regression was fixed in version 8.4.3. an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) (issue: {es-issue}90187[#90187]) +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] [[bug-8.4.2]] [float] diff --git a/docs/reference/release-notes/8.4.3.asciidoc b/docs/reference/release-notes/8.4.3.asciidoc index e16c5006e7ff..7038091ab6be 100644 --- a/docs/reference/release-notes/8.4.3.asciidoc +++ b/docs/reference/release-notes/8.4.3.asciidoc @@ -3,6 +3,18 @@ Also see <>. +[[known-issues-8.4.3]] +[float] +=== Known issues + +* When using date range search with format that does not have all date fields (missing month or day) +an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) +(issue: {es-issue}90187[#90187]) + +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + [[bug-8.4.3]] [float] === Bug fixes @@ -19,12 +31,3 @@ Ingest Node:: Ranking:: * Ensure `cross_fields` always uses valid term statistics {es-pull}90314[#90314] - - -[[known-issues-8.4.3]] -[float] -=== Known issues - -* When using date range search with format that does not have all date fields (missing month or day) -an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) -(issue: {es-issue}90187[#90187]) diff --git a/docs/reference/release-notes/8.5.0.asciidoc b/docs/reference/release-notes/8.5.0.asciidoc index 1dbb44ca5f1d..50df64084340 100644 --- a/docs/reference/release-notes/8.5.0.asciidoc +++ b/docs/reference/release-notes/8.5.0.asciidoc @@ -3,6 +3,21 @@ Also see <>. +[[known-issues-8.5.0]] +[float] +=== Known issues + +* It is possible to inadvertently create an alias with the same name as an +index in version 8.5.0. This action leaves the cluster in an invalid state in +which several features will not work correctly, and it may not even be possible +to restart nodes while in this state. Upgrade to 8.5.1 as soon as possible to +avoid the risk of this occurring ({es-pull}91456[#91456]). If your cluster is +affected by this issue, upgrade to 8.5.3 to repair it ({es-pull}91887[#91887]). + +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + [[breaking-8.5.0]] [float] === Breaking changes @@ -319,12 +334,3 @@ Client:: Packaging:: * Upgrade bundled JDK to Java 19 {es-pull}90571[#90571] - - -[[known-issues-8.5.0]] -[float] -=== Known issues - -* When using date range search with format that does not have all date fields (missing month or day) -an incorrectly parsed date could be used. The workaround is to use date pattern with all date fields (year, month, day) -(issue: {es-issue}90187[#90187]) diff --git a/docs/reference/release-notes/8.5.1.asciidoc b/docs/reference/release-notes/8.5.1.asciidoc index 4bc3b4c54e41..9b326c760f7e 100644 --- a/docs/reference/release-notes/8.5.1.asciidoc +++ b/docs/reference/release-notes/8.5.1.asciidoc @@ -4,6 +4,14 @@ Also see <>. +[[known-issues-8.5.1]] +[float] +=== Known issues + +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + [[bug-8.5.1]] [float] === Bug fixes diff --git a/docs/reference/release-notes/8.5.2.asciidoc b/docs/reference/release-notes/8.5.2.asciidoc new file mode 100644 index 000000000000..147d7b9df890 --- /dev/null +++ b/docs/reference/release-notes/8.5.2.asciidoc @@ -0,0 +1,47 @@ +[[release-notes-8.5.2]] +== {es} version 8.5.2 + + +Also see <>. + +[[known-issues-8.5.2]] +[float] +=== Known issues + +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + +[[bug-8.5.2]] +[float] +=== Bug fixes + +Authorization:: +* Avoid potential unsupported operation exception in doc bitset cache {es-pull}91490[#91490] + +EQL:: +* Refine bwc version checks on `EqlSearchRequest` {es-pull}91510[#91510] + +Health:: +* SLM uneahlthy policies diagnosis recommends correct URL in action {es-pull}91506[#91506] + +Ingest Node:: +* Refactor `DatabaseNodeService` as a cluster state listener {es-pull}91567[#91567] (issue: {es-issue}86999[#86999]) + +Stats:: +* Fix NPE in IndexService getNodeMappingStats {es-pull}91334[#91334] (issue: {es-issue}91259[#91259]) + +Transform:: +* Fix failure when resolving indices from CCS {es-pull}91622[#91622] (issue: {es-issue}91550[#91550]) + +[[enhancement-8.5.2]] +[float] +=== Enhancements + +EQL:: +* Remove version limitations for CCS {es-pull}91409[#91409] + +Ingest Node:: +* Refactor enrich maintenance coordination logic {es-pull}90931[#90931] + + diff --git a/docs/reference/release-notes/8.5.3.asciidoc b/docs/reference/release-notes/8.5.3.asciidoc new file mode 100644 index 000000000000..3e19cce9254b --- /dev/null +++ b/docs/reference/release-notes/8.5.3.asciidoc @@ -0,0 +1,51 @@ +[[release-notes-8.5.3]] +== {es} version 8.5.3 + +Also see <>. + +[[known-issues-8.5.3]] +[float] +=== Known issues + +include::8.4.0.asciidoc[tag=ml-pre-7-datafeeds-known-issue] + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + +[[bug-8.5.3]] +[float] +=== Bug fixes + +Infra/Core:: +* Add `trace.id` to request trace logs {es-pull}91772[#91772] (issue: {es-issue}88174[#88174]) +* `DoPrivileged` in `ElasticsearchEncaughtExceptionHandler` and check modify thread {es-pull}91704[#91704] (issue: {es-issue}91650[#91650]) + +Ingest Node:: +* Handle any exception thrown while generating source for an `IngestDocument` {es-pull}91981[#91981] + +Machine Learning:: +* ML stats failures should not stop the usage API working {es-pull}91917[#91917] (issue: {es-issue}91893[#91893]) + +Stats:: +* Fix NPE in IndexService getNodeMappingStats {es-pull}91334[#91334] (issue: {es-issue}91259[#91259]) + +Transform:: +* Fix failure when resolving indices from CCS {es-pull}91622[#91622] (issue: {es-issue}91550[#91550]) + +[[enhancement-8.5.3]] +[float] +=== Enhancements + +Ingest Node:: +* Refactor enrich maintenance coordination logic {es-pull}90931[#90931] + +TLS:: +* Support SAN/dnsName for restricted trust {es-pull}91946[#91946] + +[[upgrade-8.5.3]] +[float] +=== Upgrades + +Engine:: +* Upgrade Lucene to version 9.4.2 {es-pull}91823[#91823] + + diff --git a/docs/reference/release-notes/8.6.0.asciidoc b/docs/reference/release-notes/8.6.0.asciidoc index dd1bfd4cd268..b268cd63534b 100644 --- a/docs/reference/release-notes/8.6.0.asciidoc +++ b/docs/reference/release-notes/8.6.0.asciidoc @@ -1,8 +1,275 @@ [[release-notes-8.6.0]] == {es} version 8.6.0 -coming[8.6.0] - Also see <>. +[[known-issues-8.6.0]] +[float] +=== Known issues + +include::8.4.0.asciidoc[tag=file-based-settings-deadlock-known-issue] + +[[bug-8.6.0]] +[float] +=== Bug fixes + +Aggregations:: +* GeoBoundsAggregations reject sub aggregations {es-pull}91073[#91073] (issue: {es-issue}91072[#91072]) + +Allocation:: +* Avoid NPE when disassociateDeadNodes is executed for a node present in the desired balance {es-pull}91659[#91659] +* Check `NodesShutdownMetadata` type before assuming restart {es-pull}90807[#90807] + +Authentication:: +* Fix time unit for connection request timeout of JWKs reload {es-pull}92080[#92080] +* Improve performance for role mapping with DNs {es-pull}92074[#92074] +* Improve robustness of `JwkSet` reloading {es-pull}92081[#92081] +* Support stored authentication headers prior to version 6.7 {es-pull}92221[#92221] + +Authorization:: +* Make adding auth info to REST responses more robust {es-pull}92168[#92168] +* Security remove datemath special handling {es-pull}91047[#91047] + +Cluster Coordination:: +* Fix `TransportMasterNodeAction` holding a CS reference needlessly {es-pull}90724[#90724] (issue: {es-issue}89220[#89220]) +* Include last-committed data in publication {es-pull}92259[#92259] (issue: {es-issue}90158[#90158]) +* Unsafe bootstrap memory optimization {es-pull}92493[#92493] + +EQL:: +* EQL sequences: support join on multi-values {es-pull}89965[#89965] + +Graph:: +* Fix potential issue with graph api's timed out field in response {es-pull}91006[#91006] + +Health:: +* Don't account for the unassigned reason when diagnosing NO_VALID_SHARD_COPY {es-pull}92416[#92416] +* Fix NPE when evaluating the disk health for non-data nodes {es-pull}92643[#92643] +* Use https in the short URLs for the `shards_availability` indicator {es-pull}92310[#92310] + +Indices APIs:: +* Trigger index settings providers when updating component templates {es-pull}91615[#91615] (issue: {es-issue}91592[#91592]) + +Infra/Core:: +* Check reserved state in Metadata.isGlobalStateEquals {es-pull}92124[#92124] +* Datastream unavailable exception metadata {es-pull}91461[#91461] +* Fix `BytesRefArray` on append empty `BytesRef` {es-pull}91364[#91364] +* Fix index expression options for requests with a single name or pattern {es-pull}91231[#91231] +* Force init of Unbox in log4j {es-pull}92377[#92377] (issue: {es-issue}91964[#91964]) +* In file based settings, wait until security index is ready for role mappings {es-pull}92173[#92173] (issue: {es-issue}91939[#91939]) +* Index expression exclusions never trigger "not found" {es-pull}90902[#90902] +* Update error states from inside the main state executor {es-pull}90346[#90346] (issue: {es-issue}90337[#90337]) + +Infra/Scripting:: +* Fix compile with hex literals ending with d/f {es-pull}91501[#91501] (issue: {es-issue}88614[#88614]) + +Ingest Node:: +* Fixing a race condition in `EnrichCoordinatorProxyAction` that can leave an item stuck in its queue {es-pull}90688[#90688] (issue: {es-issue}90598[#90598]) + +Machine Learning:: +* Copy more settings when creating DF analytics destination index {es-pull}91546[#91546] (issue: {es-issue}89795[#89795]) +* Fix for 'No statistics' error message {ml-pull}2410[#2410] +* Fix for 'No counts available' error message {ml-pull}2414[#2414] +* Guard against input sequences that are too long for Question Answering models {es-pull}91924[#91924] +* Improve performance of closing files before spawning {ml-pull}2424[#2424] +* Skip remote clusters when performing up front privileges validation for datafeeds {es-pull}91895[#91895] (issue: {es-issue}87832[#87832]) +* Support fields with commas in data frame analytics `analyzed_fields` {es-pull}91710[#91710] (issue: {es-issue}72541[#72541]) +* Validate rule filters are present on open anomaly detection api {es-pull}92207[#92207] + + +Mapping:: +* Consolidate field name validation when parsing mappings and documents {es-pull}91328[#91328] +* Fix handling empty key case in the terms aggregation {es-pull}90822[#90822] + +Monitoring:: +* Fix logstash loadavg (xpack cases) {es-pull}90494[#90494] +* [Stack Monitoring] Update ES module mappings {es-pull}90649[#90649] + +Network:: +* Clean up on exception while chunking XContent {es-pull}92024[#92024] +* Fix Chunked APIs sending incorrect responses to HEAD requests {es-pull}92042[#92042] (issue: {es-issue}92032[#92032]) +* Reject connection attempts while closing {es-pull}92465[#92465] + +SQL:: +* Fix NPE on logging when not tracking total hits {es-pull}92425[#92425] + +Search:: +* Allow different decay values depending on the score function {es-pull}91195[#91195] (issue: {es-issue}78887[#78887]) +* Fix timing bug with DFS profiling {es-pull}92421[#92421] + +Snapshot/Restore:: +* Simplify and optimize deduplication of `RepositoryData` for a non-caching repository instance {es-pull}91851[#91851] (issue: {es-issue}89952[#89952]) + +Store:: +* Fix numOpenOutputs and modCount in ByteSizeCachingDirectory {es-pull}92440[#92440] (issue: {es-issue}92434[#92434]) + +Transform:: +* Skip remote clusters when performing up front privileges validation {es-pull}91788[#91788] + +Vector Search:: +* Make `knn` search requests fully cancellable {es-pull}90612[#90612] + +[[deprecation-8.6.0]] +[float] +=== Deprecations + +Allocation:: +* Deprecate state field in /_cluster/reroute response {es-pull}90399[#90399] +* Ensure balance threshold is at least 1 {es-pull}92100[#92100] + +Ingest Node:: +* Deprecate 'remove_binary' default of false for ingest attachment processor {es-pull}90460[#90460] + +Mapping:: +* Deprecate silently ignoring type, fields, copy_to and boost in metadata field definition {es-pull}90989[#90989] (issue: {es-issue}35389[#35389]) + +[[enhancement-8.6.0]] +[float] +=== Enhancements + +Allocation:: +* Clear up forecasted write load and shard size from previous write index during rollovers {es-pull}91590[#91590] +* Forecast average shard size during rollovers {es-pull}91561[#91561] +* Forecast write load during rollovers {es-pull}91425[#91425] +* Improve shard balancing {es-pull}91603[#91603] +* Introduce desired-balance allocator {es-pull}91343[#91343] +* Limit shard realocation retries {es-pull}90296[#90296] +* Prevalidate node removal API {es-pull}88952[#88952] +* Set default `cluster.routing.allocation.balance.disk_usage` {es-pull}91951[#91951] +* Store write load in the `IndexMetadata` during data streams rollovers {es-pull}91019[#91019] +* Update the default `cluster.routing.allocation.balance.disk_usage` {es-pull}92065[#92065] +* `DesiredBalance:` expose it via _internal/desired_balance {es-pull}91038[#91038] (issue: {es-issue}90583[#90583]) + +Authorization:: +* [Fleet] Added logs-elastic_agent* read privileges to `kibana_system` {es-pull}91701[#91701] + +CRUD:: +* Keep track of average shard write load {es-pull}90768[#90768] (issue: {es-issue}90102[#90102]) + +Geo:: +* Centroid aggregation for cartesian points and shapes {es-pull}89216[#89216] (issue: {es-issue}90156[#90156]) +* Improve H3#hexRing logic and add H3#areNeighborCells method {es-pull}91140[#91140] +* Move SpatialUtils to geo library {es-pull}88088[#88088] (issue: {es-issue}86607[#86607]) +* Reduce number of object allocations in H3#geoToH3 and speed up computations {es-pull}91492[#91492] +* Support `cartesian_bounds` aggregation on point and shape {es-pull}91298[#91298] (issue: {es-issue}90157[#90157]) + +ILM+SLM:: +* ILM: Get policy support wildcard name {es-pull}89238[#89238] + +Infra/Core:: +* Handle APM global labels as affix setting {es-pull}91438[#91438] (issue: {es-issue}91278[#91278]) +* Improve date math exclusions in expressions {es-pull}90298[#90298] +* Introduce a phase to use String.equals on constant strings, rather than def equality {es-pull}91362[#91362] (issue: {es-issue}91235[#91235]) +* More actionable error for ancient indices {es-pull}91243[#91243] +* Operator/index templates {es-pull}90143[#90143] +* Operator/ingest {es-pull}89735[#89735] +* Transport threads and `_hot_threads` {es-pull}90482[#90482] (issue: {es-issue}90334[#90334]) +* Upgrade XContent to Jackson 2.14.0 and enable Fast Double Parser {es-pull}90553[#90553] + +Infra/Plugins:: +* Create placeholder plugin when loading stable plugins {es-pull}90870[#90870] +* Example stable plugin {es-pull}90805[#90805] +* Make `extendedPlugins,` `HasNativeController` and `moduleName` optional in plugin descriptor {es-pull}90835[#90835] +* Rename `NamedComponent` name parameter to value {es-pull}91306[#91306] + +Infra/Scripting:: +* Use an explicit null check for null receivers in painless, rather than an NPE {es-pull}91347[#91347] (issue: {es-issue}91236[#91236]) + +Machine Learning:: +* Add a filter parameter to frequent items {es-pull}91137[#91137] +* Add a regex to the output of the `categorize_text` aggregation {es-pull}90723[#90723] +* Add ability to filter and sort buckets by `change_point` numeric values {es-pull}91299[#91299] +* Add api to update trained model deployment `number_of_allocations` {es-pull}90728[#90728] +* Alias timestamp to @timestamp in anomaly detection results index {es-pull}90812[#90812] +* Allow `model_aliases` to be used with Pytorch trained models {es-pull}91296[#91296] +* Allow overriding timestamp field to null in file structure finder {es-pull}90764[#90764] +* Audit a message every day the datafeed has seen no data {es-pull}91774[#91774] +* Low priority trained model deployments {es-pull}91234[#91234] (issue: {es-issue}91024[#91024]) +* Provide additional information about anomaly score factors {es-pull}90675[#90675] + +Mapping:: +* Don't create IndexCaps objects when recording unmapped fields {es-pull}90806[#90806] (issue: {es-issue}90796[#90796]) +* aggregate metric double add a max min validation {es-pull}90381[#90381] + +Recovery:: +* Remove resize index settings once shards are started {es-pull}90391[#90391] (issue: {es-issue}90127[#90127]) + +Rollup:: +* Test downsample runtime fields and security {es-pull}90593[#90593] + +Search:: +* Add LimitedOffsetsEnum to Limited offset token {es-pull}86110[#86110] (issue: {es-issue}86109[#86109]) +* Add profiling and documentation for dfs phase {es-pull}90536[#90536] (issue: {es-issue}89713[#89713]) +* Bulk merge field-caps responses using mapping hash {es-pull}86323[#86323] +* Enhance nested depth tracking when parsing queries {es-pull}90425[#90425] +* Expose telemetry about search usage {es-pull}91528[#91528] +* Return docs when using nested mappings in archive indices {es-pull}90585[#90585] (issue: {es-issue}90523[#90523]) +* Use `IndexOrDocValues` query for IP range queries {es-pull}90303[#90303] (issue: {es-issue}83658[#83658]) + +Snapshot/Restore:: +* Increase snaphot pool max size to 10 {es-pull}90282[#90282] (issue: {es-issue}89608[#89608]) +* Tie snapshot speed to node bandwidth settings {es-pull}91021[#91021] (issue: {es-issue}57023[#57023]) + +Store:: +* Allow plugins to wrap Lucene directories created by the `IndexModule` {es-pull}91556[#91556] + +TLS:: +* Add certificate start/expiry dates to SSL Diagnostic message {es-pull}89461[#89461] + +TSDB:: +* Generate 'index.routing_path' from dynamic mapping templates {es-pull}90552[#90552] (issue: {es-issue}90528[#90528]) +* Support malformed numbers in synthetic `_source` {es-pull}90428[#90428] +* Support synthetic `_source` for `_doc_count` field {es-pull}91465[#91465] +* Synthetic _source: support `field` in many cases {es-pull}89950[#89950] +* Synthetic `_source`: `ignore_malformed` for `ip` {es-pull}90038[#90038] +* Synthetic `_source`: support `wildcard` field {es-pull}90196[#90196] + +Transform:: +* Add a health section to transform stats {es-pull}90760[#90760] +* Support `aggregate_metric_double` field type in transform aggregations {es-pull}91045[#91045] + +Vector Search:: +* Add profiling information for knn vector queries {es-pull}90200[#90200] + +[[feature-8.6.0]] +[float] +=== New features + +Distributed:: +* Add "index" and "search" node roles with feature flag and setting {es-pull}90993[#90993] + +EQL:: +* EQL samples {es-pull}91312[#91312] + +Health:: +* Use chunked encoding for `RestGetHealthAction` {es-pull}91515[#91515] (issue: {es-issue}90223[#90223]) +* [HealthAPI] Use the `RestCancellableNodeClient` infrastructure {es-pull}91587[#91587] + +Machine Learning:: +* Make `categorize_text` aggregation GA {es-pull}88600[#88600] + +Vector Search:: +* Add fielddata and scripting support for byte-sized vectors {es-pull}91184[#91184] +* Add support for indexing byte-sized knn vectors {es-pull}90774[#90774] + +[[regression-8.6.0]] +[float] +=== Regressions + +Infra/Core:: +* Revert "Remove `ImmutableOpenMap` from snapshot services" {es-pull}90287[#90287] + +[[upgrade-8.6.0]] +[float] +=== Upgrades + +Infra/Logging:: +* Upgrade to log4j 2.19.0 {es-pull}90589[#90589] (issue: {es-issue}90584[#90584]) + +Network:: +* Upgrade to Netty 4.1.82.Final {es-pull}90604[#90604] +* Upgrade to Netty 4.1.84 {es-pull}91271[#91271] + +Snapshot/Restore:: +* Upgrade GCS SDK to 2.13.1 {es-pull}92327[#92327] + diff --git a/docs/reference/release-notes/8.6.1.asciidoc b/docs/reference/release-notes/8.6.1.asciidoc new file mode 100644 index 000000000000..a02142a5ee6a --- /dev/null +++ b/docs/reference/release-notes/8.6.1.asciidoc @@ -0,0 +1,33 @@ +[[release-notes-8.6.1]] +== {es} version 8.6.1 + +Also see <>. + +[[bug-8.6.1]] +[float] +=== Bug fixes + +Data streams:: +* Fix wildcard expansion for delete-by-query on data streams {es-pull}92891[#92891] +* Fix wildcard expansion for update-by-query on data streams {es-pull}92717[#92717] (issue: {es-issue}90272[#90272]) + +Distributed:: +* Fix `ByteArrayIndexInput` with nonzero offset {es-pull}93205[#93205] + +ILM+SLM:: +* Get repository metadata from the cluster state doesn't throw an exception if a repo is missing {es-pull}92914[#92914] + +Infra/Core:: +* Don't announce ready until file settings are applied {es-pull}92856[#92856] (issue: {es-issue}92812[#92812]) + +Machine Learning:: +* Utilise parallel allocations where the inference request contains multiple documents {es-pull}92359[#92359] + +Mapping:: +* Fix `_bulk` api `dynamic_templates` and explicit `op_type` {es-pull}92687[#92687] + +Search:: +* Avoid doing I/O when fetching min and max for keyword fields {es-pull}92026[#92026] +* Reduce memory required for search responses when many shards are unavailable {es-pull}91365[#91365] (issue: {es-issue}90622[#90622]) + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index c514f86e4725..c0a06628631d 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -12,8 +12,8 @@ endif::[] // Add previous release to the list Other versions: -//{ref-bare}/8.6/release-highlights.html[8.6] -{ref-bare}/8.5/release-highlights.html[8.5] +{ref-bare}/8.6/release-highlights.html[8.6] +| {ref-bare}/8.5/release-highlights.html[8.5] | {ref-bare}/8.4/release-highlights.html[8.4] | {ref-bare}/8.3/release-highlights.html[8.3] | {ref-bare}/8.2/release-highlights.html[8.2] diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 39e8d52824bc..ca2d55c54333 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -126,17 +126,17 @@ tag::cluster-health-status[] Health status of the cluster, based on the state of its primary and replica shards. Statuses are: - `green`::: - All shards are assigned. + * `green`: + All shards are assigned. - `yellow`::: - All primary shards are assigned, but one or more replica shards are - unassigned. If a node in the cluster fails, some - data could be unavailable until that node is repaired. + * `yellow`: + All primary shards are assigned, but one or more replica shards are + unassigned. If a node in the cluster fails, some data could be unavailable + until that node is repaired. - `red`::: - One or more primary shards are unassigned, so some data is unavailable. This - can occur briefly during cluster startup as primary shards are assigned. + * `red`: + One or more primary shards are unassigned, so some data is unavailable. This + can occur briefly during cluster startup as primary shards are assigned. end::cluster-health-status[] tag::committed[] @@ -713,7 +713,10 @@ end::payloads[] tag::pipeline[] `pipeline`:: -(Optional, string) ID of the pipeline to use to preprocess incoming documents. +(Optional, string) ID of the pipeline to use to preprocess incoming documents. If the index has a +default ingest pipeline specified, then setting the value to `_none` disables the default ingest +pipeline for this request. If a final pipeline is configured it will always run, regardless of the +value of this parameter. end::pipeline[] tag::pages-processed[] diff --git a/docs/reference/rest-api/usage.asciidoc b/docs/reference/rest-api/usage.asciidoc index 13773b02fe41..8ae0ed5de5fd 100644 --- a/docs/reference/rest-api/usage.asciidoc +++ b/docs/reference/rest-api/usage.asciidoc @@ -400,6 +400,13 @@ GET /_xpack/usage "available" : true, "enabled" : true, "indices_count" : 0 + }, + "health_api" : { + "available" : true, + "enabled" : true, + "invocations": { + "total": 0 + } } } ------------------------------------------------------------ @@ -408,6 +415,7 @@ GET /_xpack/usage // TESTRESPONSE[s/"eql" : \{[^\}]*\},/"eql" : $body.$_path,/] // TESTRESPONSE[s/"policy_stats" : \[[^\]]*\]/"policy_stats" : $body.$_path/] // TESTRESPONSE[s/"slm" : \{[^\}]*\},/"slm" : $body.$_path,/] +// TESTRESPONSE[s/"health_api" : \{[^\}]*\}\s*\}/"health_api" : $body.$_path/] // TESTRESPONSE[s/ : true/ : $body.$_path/] // TESTRESPONSE[s/ : false/ : $body.$_path/] // TESTRESPONSE[s/ : (\-)?[0-9]+/ : $body.$_path/] @@ -419,6 +427,7 @@ GET /_xpack/usage // 2. Handling eql, which is disabled by default on release builds and enabled // everywhere else during the initial implementation phase until its release // 3. Ignore the contents of the `slm` object because it might contain policies -// 4. All of the numbers and strings on the right hand side of *every* field in +// 4. Ignore the contents of the `health_api` object because we do not know if the api has been called or not +// 5. All of the numbers and strings on the right hand side of *every* field in // the response are ignored. So we're really only asserting things about the // the shape of this response, not the values in it. diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 6da95f694728..3a147e6ad9bc 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -19,7 +19,6 @@ exception of the <>. * <> * <> * <> -* <> [discrete] [[search-testing-apis]] @@ -53,8 +52,6 @@ include::search/point-in-time-api.asciidoc[] include::search/knn-search.asciidoc[] -include::search/semantic-search.asciidoc[] - include::search/scroll-api.asciidoc[] include::search/clear-scroll-api.asciidoc[] diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 30a554ec0b60..7369f5956ec4 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -97,7 +97,9 @@ The API returns the following result: "create_weight": 4694895, "shallow_advance": 0, "create_weight_count": 1, - "build_scorer": 7112295 + "build_scorer": 7112295, + "count_weight": 0, + "count_weight_count": 0 }, "children": [ { @@ -122,7 +124,9 @@ The API returns the following result: "create_weight": 2382719, "shallow_advance": 9754, "create_weight_count": 1, - "build_scorer": 1355007 + "build_scorer": 1355007, + "count_weight": 0, + "count_weight_count": 0 } }, { @@ -147,7 +151,9 @@ The API returns the following result: "create_weight": 130951, "shallow_advance": 2512, "create_weight_count": 1, - "build_scorer": 46153 + "build_scorer": 46153, + "count_weight": 0, + "count_weight_count": 0 } } ] @@ -386,7 +392,9 @@ Lucene execution: "create_weight": 4694895, "shallow_advance": 0, "create_weight_count": 1, - "build_scorer": 7112295 + "build_scorer": 7112295, + "count_weight": 0, + "count_weight_count": 0 } -------------------------------------------------- // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:get message:search",\n"time_in_nanos": $body.$_path,/] @@ -653,10 +661,12 @@ The API returns the following result: "compute_max_score": 0, "advance": 3942, "advance_count": 4, + "count_weight_count": 0, "score": 0, "build_scorer_count": 2, "create_weight": 38380, "shallow_advance": 0, + "count_weight": 0, "create_weight_count": 1, "build_scorer": 99296 } @@ -679,9 +689,11 @@ The API returns the following result: "advance": 3552, "advance_count": 1, "score": 5027, + "count_weight_count": 0, "build_scorer_count": 2, "create_weight": 107840, "shallow_advance": 0, + "count_weight": 0, "create_weight_count": 1, "build_scorer": 44215 } @@ -967,7 +979,7 @@ overall time, the breakdown is inclusive of all children times. ===== Profiling Fetch All shards that fetched documents will have a `fetch` section in the profile. -Let's execute a small search and have a look a the fetch profile: +Let's execute a small search and have a look at the fetch profile: [source,console] ---- @@ -1236,7 +1248,8 @@ One of the `dfs.knn` sections for a shard looks like the following: [source,js] -------------------------------------------------- "dfs" : { - "knn" : { + "knn" : [ + { "query" : [ { "type" : "DocAndScoreQuery", @@ -1260,7 +1273,9 @@ One of the `dfs.knn` sections for a shard looks like the following: "create_weight" : 128879, "shallow_advance" : 0, "create_weight_count" : 1, - "build_scorer" : 307595 + "build_scorer" : 307595, + "count_weight": 0, + "count_weight_count": 0 } } ], @@ -1272,7 +1287,7 @@ One of the `dfs.knn` sections for a shard looks like the following: "time_in_nanos" : 17163 } ] - } + } ] } -------------------------------------------------- // TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n/] diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index 8206ec794eb0..4ca8acb1853b 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,16 +1,17 @@ -[cols="^,^,^,^,^,^,^,^,^,^,^"] +[cols="^,^,^,^,^,^,^,^,^,^,^,^"] |==== -| 10+^h| Remote cluster version +| 11+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} |==== \ No newline at end of file diff --git a/docs/reference/search/search-your-data/highlighting.asciidoc b/docs/reference/search/search-your-data/highlighting.asciidoc index 291ede6fcb44..8c2fa89bb341 100644 --- a/docs/reference/search/search-your-data/highlighting.asciidoc +++ b/docs/reference/search/search-your-data/highlighting.asciidoc @@ -948,7 +948,7 @@ highlighter is to find the best text fragments for the query, and highlight the query terms in the found fragments. For this, a highlighter needs to address several questions: -- How break a text into fragments? +- How to break a text into fragments? - How to find the best fragments among all fragments? - How to highlight the query terms in a fragment? diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index 6789ac4b9954..26ce7a12c115 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -151,7 +151,7 @@ page cache for it to be efficient. Please consult the configuration and sizing. To run an approximate kNN search, use the <> -to search a `dense_vector` field with indexing enabled. +to search one or more `dense_vector` fields with indexing enabled. . Explicitly map one or more `dense_vector` fields. Approximate kNN search requires the following mapping options: @@ -176,6 +176,12 @@ PUT image-index "index": true, "similarity": "l2_norm" }, + "title-vector": { + "type": "dense_vector", + "dims": 5, + "index": true, + "similarity": "l2_norm" + }, "title": { "type": "text" }, @@ -194,11 +200,11 @@ PUT image-index ---- POST image-index/_bulk?refresh=true { "index": { "_id": "1" } } -{ "image-vector": [1, 5, -20], "title": "moose family", "file-type": "jpg" } +{ "image-vector": [1, 5, -20], "title-vector": [12, 50, -10, 0, 1], "title": "moose family", "file-type": "jpg" } { "index": { "_id": "2" } } -{ "image-vector": [42, 8, -15], "title": "alpine lake", "file-type": "png" } +{ "image-vector": [42, 8, -15], "title-vector": [25, 1, 4, -12, 2], "title": "alpine lake", "file-type": "png" } { "index": { "_id": "3" } } -{ "image-vector": [15, 11, 23], "title": "full moon", "file-type": "jpg" } +{ "image-vector": [15, 11, 23], "title-vector": [1, 5, 25, 50, 20], "title": "full moon", "file-type": "jpg" } ... ---- //TEST[continued] @@ -406,6 +412,54 @@ over all documents that match the search. So for approximate kNN search, aggrega nearest documents. If the search also includes a `query`, then aggregations are calculated on the combined set of `knn` and `query` matches. +[discrete] +==== Search multiple kNN fields + +In addition to 'hybrid retrieval', you can search more than one kNN vector field at a time: + +[source,console] +---- +POST image-index/_search +{ + "query": { + "match": { + "title": { + "query": "mountain lake", + "boost": 0.9 + } + } + }, + "knn": [ { + "field": "image-vector", + "query_vector": [54, 10, -2], + "k": 5, + "num_candidates": 50, + "boost": 0.1 + }, + { + "field": "title-vector", + "query_vector": [1, 20, -52, 23, 10], + "k": 10, + "num_candidates": 10, + "boost": 0.5 + }], + "size": 10 +} +---- +// TEST[continued] + +This search finds the global top `k = 5` vector matches for `image-vector` and the global `k = 10` for the `title-vector`. +These top values are then combined with the matches from the `match` query and the top-10 documents are returned. +The multiple `knn` entries and the `query` matches are combined through a disjunction, +as if you took a boolean 'or' between them. The top `k` vector results represent the global nearest neighbors across +all index shards. + +The scoring for a doc with the above configured boosts would be: + +``` +score = 0.9 * match_score + 0.1 * knn_score_image-vector + 0.5 * knn_score_title-vector +``` + [discrete] [[knn-indexing-considerations]] ==== Indexing considerations @@ -415,7 +469,7 @@ segment as an https://arxiv.org/abs/1603.09320[HNSW graph]. Indexing vectors for approximate kNN search can take substantial time because of how expensive it is to build these graphs. You may need to increase the client request timeout for index and bulk requests. The <> -contains important guidance around indexing performance, and how the the index +contains important guidance around indexing performance, and how the index configuration can affect search performance. In addition to its search-time tuning parameters, the HNSW algorithm has diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 67bf05b36f13..63eebb225e0e 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -483,14 +483,14 @@ A boost value greater than `1.0` increases the score. A boost value between experimental::[] [[search-api-knn]] `knn`:: -(Optional, object) +(Optional, object or array of objects) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn] + .Properties of `knn` object [%collapsible%open] ==== `field`:: -(Required, string) +(Required, string) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-field] `filter`:: @@ -498,16 +498,22 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-field] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-filter] `k`:: -(Required, integer) +(Required, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-k] `num_candidates`:: -(Required, integer) +(Required, integer) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] `query_vector`:: -(Required, array of floats) +(Optional, array of floats) include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-query-vector] + +`query_vector_builder`:: +(Optional, object) +A configuration object indicating how to build a query_vector before executing the request. You must provide +a `query_vector_builder` or `query_vector`, but not both. + ==== [[search-api-min-score]] diff --git a/docs/reference/search/semantic-search.asciidoc b/docs/reference/search/semantic-search.asciidoc deleted file mode 100644 index 95a142d6d2a6..000000000000 --- a/docs/reference/search/semantic-search.asciidoc +++ /dev/null @@ -1,167 +0,0 @@ -[[semantic-search-api]] -=== Semantic search API -++++ -Semantic search -++++ - -experimental::[] -Semantic search uses a text embedding NLP model to generate a dense vector from the input query string. -The resulting dense vector is then used in a <> against an index containing dense vectors -created with the same text embedding model. The search results are semantically similar as learned -by the model. - -[source,console] ----- -GET my-index/_semantic_search -{ - "query_string": "A picture of a snow capped mountain", - "model_id": "my-text-embedding-model", - "knn": { - "field": "text_embedding", - "k": 10, - "num_candidates": 100 - } -} ----- -// TEST[skip:TBD] - - -[[semantic-search-api-request]] -==== {api-request-title} - -`GET /_semantic_search` - -`POST /_semantic_search` - -[[semantic-search-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `read` -<> for the target data stream, index, -or alias. - -[[semantic-search-api-desc]] -==== {api-description-title} - -The semantic search API uses a text embedding model to create a dense vector -representation of the query string. - - -[[semantic-search-api-path-params]] -==== {api-path-parms-title} - -``:: -(Optional, string) Comma-separated list of data streams, indices, and aliases -to search. Supports wildcards (`*`). To search all data streams and indices, -use `*` or `_all`. - -[role="child_attributes"] -[[semantic-search-api-query-params]] -==== {api-query-parms-title} - -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=routing] - -[role="child_attributes"] -[[semantic-search-api-request-body]] -==== {api-request-body-title} - -`model_id`:: -(Required, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] - -`query_string`:: -(Required, string) The input text to embed. - -`knn`:: -(Required, object) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn] -+ -.Properties of `knn` object -[%collapsible%open] -==== -`field`:: -(Required, string) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-field] - -`filter`:: -(Optional, <>) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-filter] - -`k`:: -(Required, integer) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-k] - -`num_candidates`:: -(Required, integer) -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=knn-num-candidates] -==== - -`query`:: -(Optional, <>) Defines the search definition using the -<>. - -`text_embedding_config`:: -(Object, optional) Override certain setting of the text embedding model's -configuration. -+ -.Properties of text_embedding inference -[%collapsible%open] -===== -`results_field`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field] - -`tokenization`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization] -+ -.Properties of tokenization -[%collapsible%open] -====== -`bert`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert] -+ -.Properties of bert -[%collapsible%open] -======= -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -======= -`roberta`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta] -+ -.Properties of roberta -[%collapsible%open] -======= -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -======= -`mpnet`:::: -(Optional, object) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet] -+ -.Properties of mpnet -[%collapsible%open] -======= -`truncate`:::: -(Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate] -======= -====== -===== - -include::{es-repo-dir}/search/search.asciidoc[tag=docvalue-fields-def] -include::{es-repo-dir}/search/search.asciidoc[tag=fields-param-def] -include::{es-repo-dir}/search/search.asciidoc[tag=source-filtering-def] -include::{es-repo-dir}/search/search.asciidoc[tag=stored-fields-def] - -[role="child_attributes"] -[[semantic-search-api-response-body]] -==== {api-response-body-title} - -The semantic search response has the same structure as a kNN search response. - diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 5baad38868ca..a925e532e50f 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -380,3 +380,9 @@ multiple context clauses. The following parameters are supported for a precision value can be a distance value (`5m`, `10km` etc.) or a raw geohash precision (`1`..`12`). Defaults to generating neighbours for index time precision level. + +NOTE: The precision field does not result in a distance match. +Specifying a distance value like `10km` only results in a geohash precision value that represents tiles of that size. +The precision will be used to encode the search geo point into a geohash tile for completion matching. +A consequence of this is that points outside that tile, even if very close to the search point, will not be matched. +Reducing the precision, or increasing the distance, can reduce the risk of this happening, but not entirely remove it. diff --git a/docs/reference/searchable-snapshots/index.asciidoc b/docs/reference/searchable-snapshots/index.asciidoc index 5f883535c49a..4a56961246c2 100644 --- a/docs/reference/searchable-snapshots/index.asciidoc +++ b/docs/reference/searchable-snapshots/index.asciidoc @@ -6,11 +6,11 @@ infrequently accessed and read-only data in a very cost-effective fashion. The <> and <> data tiers use {search-snaps} to reduce your storage and operating costs. -{search-snaps-cap} eliminate the need for <> -after rolling over from the hot tier, potentially halving the local storage needed to search -your data. {search-snaps-cap} rely on the same snapshot mechanism you already -use for backups and have minimal impact on your snapshot repository storage -costs. +{search-snaps-cap} eliminate the need for <> after +rolling over from the hot tier, potentially halving the local storage needed to +search your data. {search-snaps-cap} rely on the same snapshot mechanism you +already use for backups and have minimal impact on your snapshot repository +storage costs. [discrete] [[using-searchable-snapshots]] @@ -40,9 +40,9 @@ To mount an index from a snapshot that contains multiple indices, we recommend creating a <> of the snapshot that contains only the index you want to search, and mounting the clone. You should not delete a snapshot if it has any mounted indices, so creating a clone enables you to -manage the lifecycle of the backup snapshot independently of any -{search-snaps}. If you use {ilm-init} to manage your {search-snaps} then it -will automatically look after cloning the snapshot as needed. +manage the lifecycle of the backup snapshot independently of any {search-snaps}. +If you use {ilm-init} to manage your {search-snaps} then it will automatically +look after cloning the snapshot as needed. You can control the allocation of the shards of {search-snap} indices using the same mechanisms as for regular indices. For example, you could use @@ -84,9 +84,9 @@ Use any of the following repository types with searchable snapshots: * <> You can also use alternative implementations of these repository types, for -instance <>, -as long as they are fully compatible. Use the <> API -to analyze your repository's suitability for use with searchable snapshots. +instance <>, as long as they are fully compatible. +Use the <> API to analyze your repository's suitability for +use with searchable snapshots. // end::searchable-snapshot-repo-types[] [discrete] @@ -122,40 +122,41 @@ performance characteristics and local storage footprints: [[fully-mounted]] Fully mounted index:: -Loads a full copy of the snapshotted index's shards onto node-local storage -within the cluster. {ilm-init} uses this option in the `hot` and `cold` phases. +Fully caches the snapshotted index's shards in the {es} cluster. {ilm-init} uses +this option in the `hot` and `cold` phases. + -Search performance for a fully mounted index is normally -comparable to a regular index, since there is minimal need to access the -snapshot repository. While recovery is ongoing, search performance may be -slower than with a regular index because a search may need some data that has -not yet been retrieved into the local copy. If that happens, {es} will eagerly -retrieve the data needed to complete the search in parallel with the ongoing -recovery. On-disk data is preserved across restarts, such that the node does -not need to re-download data that is already stored on the node after a restart. +Search performance for a fully mounted index is normally comparable to a regular +index, since there is minimal need to access the snapshot repository. While +recovery is ongoing, search performance may be slower than with a regular index +because a search may need some data that has not yet been retrieved into the +local cache. If that happens, {es} will eagerly retrieve the data needed to +complete the search in parallel with the ongoing recovery. On-disk data is +preserved across restarts, such that the node does not need to re-download data +that is already stored on the node after a restart. + Indices managed by {ilm-init} are prefixed with `restored-` when fully mounted. [[partially-mounted]] Partially mounted index:: Uses a local cache containing only recently searched parts of the snapshotted -index's data. This cache has a fixed size and is shared across shards of partially -mounted indices allocated on the same data node. {ilm-init} uses this option in the -`frozen` phase. +index's data. This cache has a fixed size and is shared across shards of +partially mounted indices allocated on the same data node. {ilm-init} uses this +option in the `frozen` phase. + If a search requires data that is not in the cache, {es} fetches the missing data from the snapshot repository. Searches that require these fetches are -slower, but the fetched data is stored in the cache so that similar searches -can be served more quickly in future. {es} will evict infrequently used data -from the cache to free up space. The cache is cleared when a node is restarted. +slower, but the fetched data is stored in the cache so that similar searches can +be served more quickly in future. {es} will evict infrequently used data from +the cache to free up space. The cache is cleared when a node is restarted. + -Although slower than a fully mounted index or a regular index, a -partially mounted index still returns search results quickly, even for -large data sets, because the layout of data in the repository is heavily -optimized for search. Many searches will need to retrieve only a small subset of -the total shard data before returning results. +Although slower than a fully mounted index or a regular index, a partially +mounted index still returns search results quickly, even for large data sets, +because the layout of data in the repository is heavily optimized for search. +Many searches will need to retrieve only a small subset of the total shard data +before returning results. + -Indices managed by {ilm-init} are prefixed with `partial-` when partially mounted. +Indices managed by {ilm-init} are prefixed with `partial-` when partially +mounted. To partially mount an index, you must have one or more nodes with a shared cache available. By default, dedicated frozen data tier nodes (nodes with the @@ -166,16 +167,16 @@ headroom of 100GB. Using a dedicated frozen tier is highly recommended for production use. If you do not have a dedicated frozen tier, you must configure the `xpack.searchable.snapshot.shared_cache.size` setting to reserve space for the -cache on one or more nodes. Partially mounted indices -are only allocated to nodes that have a shared cache. +cache on one or more nodes. Partially mounted indices are only allocated to +nodes that have a shared cache. [[searchable-snapshots-shared-cache]] `xpack.searchable.snapshot.shared_cache.size`:: (<>) -Disk space reserved for the shared cache of partially mounted indices. -Accepts a percentage of total disk space or an absolute <>. Defaults to `90%` of total disk space for dedicated frozen data tier -nodes. Otherwise defaults to `0b`. +Disk space reserved for the shared cache of partially mounted indices. Accepts a +percentage of total disk space or an absolute <>. +Defaults to `90%` of total disk space for dedicated frozen data tier nodes. +Otherwise defaults to `0b`. `xpack.searchable.snapshot.shared_cache.size.max_headroom`:: (<>, <>) @@ -189,8 +190,9 @@ To illustrate how these settings work in concert let us look at two examples when using the default values of the settings on a dedicated frozen node: * A 4000 GB disk will result in a shared cache sized at 3900 GB. 90% of 4000 GB -is 3600 GB, leaving 400 GB headroom. The default `max_headroom` of 100 GB -takes effect, and the result is therefore 3900 GB. +is 3600 GB, leaving 400 GB headroom. The default `max_headroom` of 100 GB takes +effect, and the result is therefore 3900 GB. + * A 400 GB disk will result in a shared cache sized at 360 GB. You can configure the settings in `elasticsearch.yml`: @@ -201,20 +203,20 @@ xpack.searchable.snapshot.shared_cache.size: 4TB ---- IMPORTANT: You can only configure these settings on nodes with the -<> role. Additionally, nodes with a shared -cache can only have a single <>. +<> role. Additionally, nodes with a shared cache +can only have a single <>. -{es} also uses a dedicated system index named `.snapshot-blob-cache` to speed -up the recoveries of {search-snap} shards. This index is used as an additional +{es} also uses a dedicated system index named `.snapshot-blob-cache` to speed up +the recoveries of {search-snap} shards. This index is used as an additional caching layer on top of the partially or fully mounted data and contains the minimal required data to start the {search-snap} shards. {es} automatically -deletes the documents that are no longer used in this index. This periodic -clean up can be tuned using the following settings: +deletes the documents that are no longer used in this index. This periodic clean +up can be tuned using the following settings: `searchable_snapshots.blob_cache.periodic_cleanup.interval`:: (<>) -The interval at which the periodic cleanup of the `.snapshot-blob-cache` -index is scheduled. Defaults to every hour (`1h`). +The interval at which the periodic cleanup of the `.snapshot-blob-cache` index +is scheduled. Defaults to every hour (`1h`). `searchable_snapshots.blob_cache.periodic_cleanup.retention_period`:: (<>) @@ -237,10 +239,10 @@ index. Defaults to `10m`. === Reduce costs with {search-snaps} In most cases, {search-snaps} reduce the costs of running a cluster by removing -the need for replica shards and for shard data to be copied between -nodes. However, if it's particularly expensive to retrieve data from a snapshot -repository in your environment, {search-snaps} may be more costly than -regular indices. Ensure that the cost structure of your operating environment is +the need for replica shards and for shard data to be copied between nodes. +However, if it's particularly expensive to retrieve data from a snapshot +repository in your environment, {search-snaps} may be more costly than regular +indices. Ensure that the cost structure of your operating environment is compatible with {search-snaps} before using them. [discrete] @@ -250,7 +252,7 @@ compatible with {search-snaps} before using them. For resiliency, a regular index requires multiple redundant copies of each shard across multiple nodes. If a node fails, {es} uses the redundancy to rebuild any lost shard copies. A {search-snap} index doesn't require replicas. If a node -containing a {search-snap} index fails, {es} can rebuild the lost shard copy +containing a {search-snap} index fails, {es} can rebuild the lost shard cache from the snapshot repository. Without replicas, rarely-accessed {search-snap} indices require far fewer @@ -264,11 +266,11 @@ only partially-mounted {search-snap} indices, requires even fewer resources. ==== Data transfer costs When a shard of a regular index is moved between nodes, its contents are copied -from another node in your cluster. In many environments, the costs of moving data -between nodes are significant, especially if running in a Cloud environment with -nodes in different zones. In contrast, when mounting a {search-snap} index or -moving one of its shards, the data is always copied from the snapshot repository. -This is typically much cheaper. +from another node in your cluster. In many environments, the costs of moving +data between nodes are significant, especially if running in a Cloud environment +with nodes in different zones. In contrast, when mounting a {search-snap} index +or moving one of its shards, the data is always copied from the snapshot +repository. This is typically much cheaper. WARNING: Most cloud providers charge significant fees for data transferred between regions and for data transferred out of their platforms. You should only @@ -281,37 +283,49 @@ multiple clusters and use <> or [[back-up-restore-searchable-snapshots]] === Back up and restore {search-snaps} -You can use <> to back up a -cluster containing {search-snap} indices. When you restore a snapshot -containing {search-snap} indices, these indices are restored as {search-snap} -indices again. +You can use <> to back up a cluster +containing {search-snap} indices. When you restore a snapshot containing +{search-snap} indices, these indices are restored as {search-snap} indices +again. Before you restore a snapshot containing a {search-snap} index, you must first <> containing the original index snapshot. When restored, the {search-snap} index mounts the -original index snapshot from its original repository. If wanted, you -can use separate repositories for regular snapshots and {search-snaps}. +original index snapshot from its original repository. If wanted, you can use +separate repositories for regular snapshots and {search-snaps}. A snapshot of a {search-snap} index contains only a small amount of metadata which identifies its original index snapshot. It does not contain any data from the original index. The restore of a backup will fail to restore any {search-snap} indices whose original index snapshot is unavailable. -Because {search-snap} indices are not regular indices, it is not possible to -use a <> to take -snapshots of {search-snap} indices. +Because {search-snap} indices are not regular indices, it is not possible to use +a <> to take snapshots +of {search-snap} indices. [discrete] [[searchable-snapshots-reliability]] === Reliability of {search-snaps} The sole copy of the data in a {search-snap} index is the underlying snapshot, -stored in the repository. If the repository fails or corrupts the contents of -the snapshot then the data is lost. Although {es} may have made copies of the -data onto local storage, these copies may be incomplete and cannot be used to -recover any data after a repository failure. You must make sure that your -repository is reliable and protects against corruption of your data while it is -at rest in the repository. +stored in the repository. For example: + +* You cannot unregister a repository while any of the searchable snapshots it +contains are mounted in {es}. You also cannot delete a snapshot if any of its +indices are mounted as a searchable snapshot in the same cluster. + +* If you mount indices from snapshots held in a repository to which a different +cluster has write access then you must make sure that the other cluster does not +delete these snapshots. + +* If you delete a snapshot while it is mounted as a searchable snapshot then the +data is lost. Similarly, if the repository fails or corrupts the contents of the +snapshot then the data is lost. + +* Although {es} may have cached the data onto local storage, these caches may be +incomplete and cannot be used to recover any data after a repository failure. +You must make sure that your repository is reliable and protects against +corruption of your data while it is at rest in the repository. The blob storage offered by all major public cloud providers typically offers very good protection against data loss or corruption. If you manage your own diff --git a/docs/reference/settings/common-defs.asciidoc b/docs/reference/settings/common-defs.asciidoc index 1fc9a8bfecf2..33e736c70046 100644 --- a/docs/reference/settings/common-defs.asciidoc +++ b/docs/reference/settings/common-defs.asciidoc @@ -149,20 +149,28 @@ Otherwise, it defaults to `jks`. end::ssl-truststore-type[] tag::ssl-verification-mode-values[] -Controls the verification of certificates. + -Valid values are: - - * `full`, which verifies that the provided certificate is signed by a trusted -authority (CA) and also verifies that the server's hostname (or IP address) -matches the names identified within the certificate. - * `certificate`, which verifies that the provided certificate is signed by a -trusted authority (CA), but does not perform any hostname verification. - * `none`, which performs _no verification_ of the server's certificate. This -mode disables many of the security benefits of SSL/TLS and should only be used -after very careful consideration. It is primarily intended as a temporary -diagnostic mechanism when attempting to resolve TLS errors; its use on -production clusters is strongly discouraged. +.Valid values +[%collapsible%open] +===== +`full`:: +Validates that the provided certificate: has an issue date that's +within the `not_before` and `not_after` dates; chains to a trusted Certificate +Authority (CA); has a `hostname` or IP address that matches the names within +the certificate. + +`certificate`:: +Validates the provided certificate and verifies that it's signed by a +trusted authority (CA), but doesn't check the certificate `hostname`. + +`none`:: +Performs no certificate validation. + -The default value is `full`. -end::ssl-verification-mode-values[] +IMPORTANT: Setting certificate validation to `none` disables many security +benefits of SSL/TLS, which is very dangerous. Only set this value if instructed +by Elastic Support as a temporary diagnostic mechanism when attempting to +resolve TLS errors. +===== ++ +Defaults to `full`. +end::ssl-verification-mode-values[] \ No newline at end of file diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 21d53aeebf89..a7c5bbcb1c40 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -45,6 +45,18 @@ starting {es} for the first time, which means that you must <>. -- +`xpack.security.enrollment.enabled`:: +(<>) +Defaults to `false`. Controls enrollment (of nodes and {kib}) to a local node +that's been <>. +When set to `true`, the local node can generate new enrollment tokens. Existing +tokens can be used for enrollment if they are still valid. ++ +-- +The security autoconfiguration process will set this to `true` unless +an administrator sets it to `false` before starting {es}. +-- + `xpack.security.hide_settings`:: (<>) A comma-separated list of settings that are omitted from the results of the @@ -1501,6 +1513,7 @@ This setting cannot be used with `ssl.truststore.password`. // tag::saml-ssl-verification-mode-tag[] `ssl.verification_mode` {ess-icon}:: (<>) +Controls the verification of certificates. include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-verification-mode-values] // end::saml-ssl-verification-mode-tag[] @@ -2010,13 +2023,24 @@ In addition to the <> can specify the following settings. // end::jwt-description-tag[] +// tag::jwt-token-type-tag[] +`token_type` {ess-icon}:: +(<>) +The token type, `id_token` or `access_token`, that the JWT realm uses to verify +incoming JWTs. Defaults to `id_token`. +// end::jwt-token-type-tag[] + // tag::jwt-allowed-audiences-tag[] `allowed_audiences` {ess-icon}:: (<>) A list of allowed JWT audiences that {es} should verify. {es} will only consume JWTs that were intended for any of these audiences, as denoted by the `aud` -claim in the JWT). Examples of `aud` claim are `https://example.com/client1` -and `other_service,elasticsearch`. +claim in the JWT). +The audiences are compared with exact string matches and do not support wildcards or regex. +Examples of `aud` claim are `https://example.com/client1` +and `other_service,elasticsearch`. When `token_type` is `access_token`, the audiences can +be optionally denoted by a different claim in the JWT if `aud` does not exist. +See also <>. // end::jwt-allowed-audiences-tag[] // tag::jwt-allowed-clock-skew-tag[] @@ -2034,9 +2058,61 @@ Identifier is usually a case sensitive URL using the https scheme that contains scheme, host, and optionally, port number and path components and no query or fragment components. However, it can be any string. The value for this setting should be provided by your JWT Issuer. +The issuer is compared with exact string matches and do not support wildcards or regex. Examples of `iss` claim are `https://example.com:8443/jwt` and `issuer123`. // end::jwt-allowed-issuer-tag[] +// tag::jwt-allowed-subjects-tag[] +`allowed_subjects` {ess-icon}:: +(<>) +A list of allowed JWT subjects that {es} should verify. {es} will only consume +JWTs that were issued for any of these subjects, as denoted by the `sub` +claim in the JWT. +The subjects are compared with exact string matches and do not support wildcards or regex. +Examples of `sub` claim are `https://example.com/user1` +and `user_1,user2`. +When `token_type` is `access_token`, this setting is mandatory and the subject can be +optionally denoted by a different claim in the JWT if `sub` does not exist. +See also <>. +// end::jwt-allowed-subjects-tag[] + +// tag::jwt-fallback-claims-sub-tag[] +[[security-settings-jwt-fallback-claims-sub]] +`fallback_claims.sub` {ess-icon}:: +(<>) +The alternative claim to look for the subject information if the `sub` claim +does not exist. It is configurable only when the `token_type` is `access_token`. +The fallback is applied everywhere the `sub` claim is used. +// end::jwt-fallback-claims-sub-tag[] + +// tag::jwt-fallback-claims-aud-tag[] +[[security-settings-jwt-fallback-claims-aud]] +`fallback_claims.aud` {ess-icon}:: +(<>) +The alternative claim to look for the audiences information if the `aud` claim +does not exist. It is configurable only when the `token_type` is `access_token`. +The fallback is applied everywhere the `aud` claim is used. +// end::jwt-fallback-claims-aud-tag[] + +// tag::jwt-required-claims-tag[] +`required_claims` {ess-icon}:: +(<>) +Additional claims and associated values that {es} should verify. +This is a group setting that takes key/value pairs, where the key is a string +and the value must be either a string or an array of strings. +The values are compared with exact string matches and do not support wildcards or regex. + +For example: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.jwt.jwt1: + required_claims: + token_use: "id" + versions: ["1.0", "2.0"] +------------------------------------------------------------ +// end::jwt-required-claims-tag[] + // tag::jwt-allowed-signature-algorithms-tag[] `allowed_signature_algorithms` {ess-icon}:: (<>) diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index 04aff7e7737b..8e139d836880 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -26,12 +26,16 @@ Defaults to +{client-auth-default}+. endif::client-auth-default[] endif::server[] -ifdef::verifies[] +{ssl-prefix}.ssl.verification_mode+:: (<>) -Controls the verification of certificates. -include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-verification-mode-values] +ifndef::verifies[] +The SSL settings in `pass:a[{ssl-prefix}.ssl]` control a _server context_ for TLS, which +defines the settings for the TLS connection. The use of `verification_mode` in +a TLS _server_ is discouraged. endif::verifies[] +Defines how to verify the certificates presented by another party in the TLS +connection: +include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-verification-mode-values] +{ssl-prefix}.ssl.cipher_suites+:: (<>) diff --git a/docs/reference/setup/advanced-configuration.asciidoc b/docs/reference/setup/advanced-configuration.asciidoc index 0fc562db2f2f..2a7ccc56742d 100644 --- a/docs/reference/setup/advanced-configuration.asciidoc +++ b/docs/reference/setup/advanced-configuration.asciidoc @@ -158,9 +158,7 @@ using the service manager. See <>. [[readiness-tcp-port]] ===== Enable the Elasticsearch TCP readiness port -preview::["This functionality is in technical preview and may be changed or removed in a future release. -It is intended for internal, experimental use. Features in technical preview are not subject to the support -SLA of official GA features."] +preview::[] If configured, a node can open a TCP port when the node is in a ready state. A node is deemed ready when it has successfully joined a cluster. In a single node configuration, the node is diff --git a/docs/reference/setup/important-settings/gc-logging.asciidoc b/docs/reference/setup/important-settings/gc-logging.asciidoc index 273ac3ca5bac..3534e1335c9f 100644 --- a/docs/reference/setup/important-settings/gc-logging.asciidoc +++ b/docs/reference/setup/important-settings/gc-logging.asciidoc @@ -37,7 +37,7 @@ Change the default GC log output location to `/opt/my-app/gc.log` by -Xlog:all=warning:stderr:utctime,level,tags # Enable GC logging to a custom location with a variety of options --Xlog:gc*,gc+age=trace,safepoint:file=/opt/my-app/gc.log:utctime,pid,tags:filecount=32,filesize=64m +-Xlog:gc*,gc+age=trace,safepoint:file=/opt/my-app/gc.log:utctime,level,pid,tags:filecount=32,filesize=64m ---- Configure an {es} <> to send GC debug logs to diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 404e1f718c3b..52a62bb3aa89 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -96,7 +96,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] [source,sh,subs="attributes"] ---- -docker run --name es01 --net elastic -p 9200:9200 -p 9300:9300 -it {docker-image} +docker run --name es01 --net elastic -p 9200:9200 -it {docker-image} ---- endif::[] @@ -403,14 +403,52 @@ sudo sysctl -w vm.max_map_count=262144 ====== Windows with https://docs.docker.com/docker-for-windows/wsl[Docker Desktop WSL 2 backend] -The `vm.max_map_count` setting must be set in the docker-desktop container: +The `vm.max_map_count` setting must be set in the "docker-desktop" WSL instance before the +ElasticSearch container will properly start. There are several ways to do this, depending +on your version of Windows and your version of WSL. + +If you are on Windows 10 before version 22H2, or if you are on Windows 10 version 22H2 using the +built-in version of WSL, you must either manually set it every time you restart Docker before starting +your ElasticSearch container, or (if you do not wish to do so on every restart) you must globally set +every WSL2 instance to have the `vm.max_map_count` changed. This is because these versions of WSL +do not properly process the /etc/sysctl.conf file. + +To manually set it every time you reboot, you must run the following commands in a command prompt +or PowerShell window every time you restart Docker: [source,sh] -------------------------------------------- -wsl -d docker-desktop +wsl -d docker-desktop -u root sysctl -w vm.max_map_count=262144 -------------------------------------------- +If you are on these versions of WSL and you do not want to have to run those commands every +time you restart Docker, you can globally change every WSL distribution with this setting +by modifying your %USERPROFILE%\.wslconfig as follows: + +[source,text] +-------------------------------------------- +[wsl2] +kernelCommandLine = "sysctl.vm.max_map_count=262144" +-------------------------------------------- + +This will cause all WSL2 VMs to have that setting assigned when they start. + +If you are on Windows 11, or Windows 10 version 22H2 and have installed the Microsoft Store +version of WSL, you can modify the /etc/sysctl.conf within the "docker-desktop" WSL +distribution, perhaps with commands like this: + +[source,sh] +-------------------------------------------- +wsl -d docker-desktop -u root +vi /etc/sysctl.conf +-------------------------------------------- + +and appending a line which reads: +[source,text] +-------------------------------------------- +vm.max_map_count = 262144 +-------------------------------------------- ===== Configuration files must be readable by the `elasticsearch` user diff --git a/docs/reference/setup/install/docker/docker-compose.yml b/docs/reference/setup/install/docker/docker-compose.yml index 4c6ba48035b4..4b4ecf401b7d 100644 --- a/docs/reference/setup/install/docker/docker-compose.yml +++ b/docs/reference/setup/install/docker/docker-compose.yml @@ -84,7 +84,6 @@ services: - xpack.security.http.ssl.key=certs/es01/es01.key - xpack.security.http.ssl.certificate=certs/es01/es01.crt - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - - xpack.security.http.ssl.verification_mode=certificate - xpack.security.transport.ssl.enabled=true - xpack.security.transport.ssl.key=certs/es01/es01.key - xpack.security.transport.ssl.certificate=certs/es01/es01.crt @@ -124,7 +123,6 @@ services: - xpack.security.http.ssl.key=certs/es02/es02.key - xpack.security.http.ssl.certificate=certs/es02/es02.crt - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - - xpack.security.http.ssl.verification_mode=certificate - xpack.security.transport.ssl.enabled=true - xpack.security.transport.ssl.key=certs/es02/es02.key - xpack.security.transport.ssl.certificate=certs/es02/es02.crt @@ -164,7 +162,6 @@ services: - xpack.security.http.ssl.key=certs/es03/es03.key - xpack.security.http.ssl.certificate=certs/es03/es03.crt - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt - - xpack.security.http.ssl.verification_mode=certificate - xpack.security.transport.ssl.enabled=true - xpack.security.transport.ssl.key=certs/es03/es03.key - xpack.security.transport.ssl.certificate=certs/es03/es03.crt diff --git a/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc b/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc index 8aa41f2a9a94..0305a917da08 100644 --- a/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc +++ b/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc @@ -27,11 +27,21 @@ filesystem, or configure {es} to use a different location for its temporary directory by setting the <> environment variable. For instance: +* If you are running {es} directly from a shell, set `$ES_TMPDIR` as follows: ++ ["source","sh",subs="attributes"] -------------------------------------------- export ES_TMPDIR=/usr/share/elasticsearch/tmp -------------------------------------------- +* If you are using `systemd` to run {es} as a service, add the following +line to the `[Service]` section in a <>: ++ +[source,text] +-------------------------------------------- +Environment=ES_TMPDIR=/usr/share/elasticsearch/tmp +-------------------------------------------- + If you need finer control over the location of these temporary files, you can also configure the path that JNA uses with the <> `-Djna.tmpdir=` and you can configure the path that `libffi` uses for its diff --git a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc index 3242fd2ee80f..649a904786f1 100644 --- a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc @@ -97,7 +97,7 @@ Don't expand wildcard patterns. `ignore_unavailable`:: (Optional, Boolean) If `false`, the snapshot fails if any data stream or index in `indices` is -missing or closed. If `true`, the snapshot ignores missing or closed data +missing. If `true`, the snapshot ignores missing data streams and indices. Defaults to `false`. `include_global_state`:: diff --git a/docs/reference/snapshot-restore/index.asciidoc b/docs/reference/snapshot-restore/index.asciidoc index aae149234de0..390f6664391b 100644 --- a/docs/reference/snapshot-restore/index.asciidoc +++ b/docs/reference/snapshot-restore/index.asciidoc @@ -199,8 +199,8 @@ contents of the repository then future snapshot or restore operations may fail, reporting corruption or other data inconsistencies, or may appear to succeed having silently lost some of your data. -You may however safely <> as long as +You may however safely <> as long as . The repository is not registered with {es} while you are restoring its contents. @@ -208,6 +208,9 @@ contents. . When you have finished restoring the repository its contents are exactly as they were when you took the backup. +If you no longer need any of the snapshots in a repository, unregister it from +{es} before deleting its contents from the underlying storage. + Additionally, snapshots may contain security-sensitive information, which you may wish to <>. diff --git a/docs/reference/snapshot-restore/repository-s3.asciidoc b/docs/reference/snapshot-restore/repository-s3.asciidoc index 097f60d36a5a..de72511010b5 100644 --- a/docs/reference/snapshot-restore/repository-s3.asciidoc +++ b/docs/reference/snapshot-restore/repository-s3.asciidoc @@ -143,9 +143,9 @@ settings belong in the `elasticsearch.yml` file. `read_timeout`:: - The socket timeout for connecting to S3. The value should specify the unit. - For example, a value of `5s` specifies a 5 second timeout. The default value - is 50 seconds. + (<>) The maximum time {es} will wait to receive the next byte + of data over an established, open connection to the repository before it closes the + connection. The default value is 50 seconds. `max_retries`:: @@ -283,7 +283,7 @@ multiple deployments may share the same bucket. `chunk_size`:: - Big files can be broken down into chunks during snapshotting if needed. + (<>) Big files can be broken down into chunks during snapshotting if needed. Specify the chunk size as a value and unit, for example: `1TB`, `1GB`, `10MB`. Defaults to the maximum size of a blob in the S3 which is `5TB`. @@ -302,7 +302,8 @@ include::repository-shared-settings.asciidoc[] `buffer_size`:: - Minimum threshold below which the chunk is uploaded using a single request. + (<>) Minimum threshold below which the chunk is + uploaded using a single request. Beyond this threshold, the S3 repository will use the https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API] to split the chunk into several parts, each of @@ -327,13 +328,14 @@ include::repository-shared-settings.asciidoc[] Sets the S3 storage class for objects stored in the snapshot repository. Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia` - and `intelligent_tiering`. Defaults to `standard`. - Changing this setting on an existing repository only affects the - storage class for newly created objects, resulting in a mixed usage of - storage classes. Additionally, S3 Lifecycle Policies can be used to manage - the storage class of existing objects. Due to the extra complexity with the - Glacier class lifecycle, it is not currently supported by this - repository type. For more information about the different classes, see + and `intelligent_tiering`. Defaults to `standard`. Changing this setting on + an existing repository only affects the storage class for newly created + objects, resulting in a mixed usage of storage classes. You may use an S3 + Lifecycle Policy to adjust the storage class of existing objects in your + repository, but you must not transition objects to Glacier classes and you + must not expire objects. If you use Glacier storage classes or object + expiry then you may permanently lose access to your repository contents. + For more information about S3 storage classes, see https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index 3aeac1c8556c..4fce5e99f449 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -289,14 +289,15 @@ the cluster. . Temporarily stop indexing and turn off the following features: + -- -* GeoIP database downloader +* GeoIP database downloader and ILM history store + [source,console] ---- PUT _cluster/settings { "persistent": { - "ingest.geoip.downloader.enabled": false + "ingest.geoip.downloader.enabled": false, + "indices.lifecycle.history_index_enabled": false } } ---- @@ -429,14 +430,15 @@ POST _snapshot/my_repository/my_snapshot_2099.05.06/_restore features you stopped: + -- -* GeoIP database downloader +* GeoIP database downloader and ILM history store + [source,console] ---- PUT _cluster/settings { "persistent": { - "ingest.geoip.downloader.enabled": true + "ingest.geoip.downloader.enabled": true, + "indices.lifecycle.history_index_enabled": true } } ---- diff --git a/docs/reference/sql/endpoints/odbc/installation.asciidoc b/docs/reference/sql/endpoints/odbc/installation.asciidoc index ba5d99ecef19..3630e09ea160 100644 --- a/docs/reference/sql/endpoints/odbc/installation.asciidoc +++ b/docs/reference/sql/endpoints/odbc/installation.asciidoc @@ -11,8 +11,8 @@ The recommended installation platform is Windows 10 64 bit or Windows Server 201 Before you install the {odbc} you need to meet the following prerequisites; -* .NET Framework 4.0 full - https://www.microsoft.com/en-au/download/details.aspx?id=17718 -* Microsoft Visual C++ Redistributable for Visual Studio 2017 or later - https://support.microsoft.com/en-au/help/2977003/the-latest-supported-visual-c-downloads +* .NET Framework 4.x full, latest - https://dotnet.microsoft.com/download/dotnet-framework +* Microsoft Visual C++ Redistributable for Visual Studio 2017 or later - https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist - The 64 bit driver requires the x64 redistributable - The 32 bit driver requires the x86 or the x64 redistributable (the latter also installs the components needed for the 32 bit driver) * Elevated privileges (administrator) for the User performing the installation. diff --git a/docs/reference/sql/security.asciidoc b/docs/reference/sql/security.asciidoc index 7e75bfe08338..784778702591 100644 --- a/docs/reference/sql/security.asciidoc +++ b/docs/reference/sql/security.asciidoc @@ -24,13 +24,34 @@ PKI/X.509:: Use X.509 certificates to authenticate {es-sql} to {es}. For this, o [discrete] [[sql-security-permissions]] ==== Permissions (server-side) -Lastly, one the server one need to add a few permissions to -users so they can run SQL. To run SQL a user needs `read` and +On the server, one needs to add a few permissions to +users so they can run SQL. To run SQL, a user needs `read` and `indices:admin/get` permissions at minimum while some parts of -the API require `cluster:monitor/main`. +the API require `cluster:monitor/main`. -The following example configures a role that can run SQL in JDBC querying the `test` and `bort` -indices: +You can add permissions by <>, and assigning +that role to the user. Roles can be created using {kib}, an +<> or the <>. Using {kib} or the role management APIs is the preferred +method for defining roles. File-based role management is useful if you want to +define a role that doesn't need to change. You cannot use the role management +APIs to view or edit a role defined in `roles.yml`. + +[discrete] +[[sql-role-api-example]] +===== Add permissions with the role management APIs + +This example configures a role that can run SQL in JDBC querying the `test` +index: + +include::{xes-repo-dir}/rest-api/security/create-roles.asciidoc[tag=sql-queries-permission] + +[discrete] +[[sql-role-file-example]] +===== Add permissions to `roles.yml` + +This example configures a role that can run SQL in JDBC querying the `test` and `bort` +indices. Add the following to `roles.yml`: [source, yaml] -------------------------------------------------- diff --git a/docs/reference/tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc b/docs/reference/tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc index 0a199bd6e48d..589965d8ab07 100644 --- a/docs/reference/tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc +++ b/docs/reference/tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc @@ -210,6 +210,21 @@ POST _snapshot/my_repository/snapshot-20200617/_restore <1> The indices to restore. + <2> We also want to restore the aliases. ++ +NOTE: If any <> need to be restored we'll need to specify them using the +`feature_states` field and the indices that belong to the feature states we restore must not be specified under `indices`. +The <> returns both the `indices` and `feature_states` that need to be restored for the restore from snapshot diagnosis. e.g.: ++ +[source,console] +---- +POST _snapshot/my_repository/snapshot-20200617/_restore +{ + "feature_states": [ "geoip" ], + "indices": "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + "include_aliases": true +} +---- +// TEST[skip:illustration purposes only] . Finally we can verify that the indices health is now `green` via the <>. + @@ -430,6 +445,21 @@ POST _snapshot/my_repository/snapshot-20200617/_restore <1> The indices to restore. + <2> We also want to restore the aliases. ++ +NOTE: If any <> need to be restored we'll need to specify them using the +`feature_states` field and the indices that belong to the feature states we restore must not be specified under `indices`. +The <> returns both the `indices` and `feature_states` that need to be restored for the restore from snapshot diagnosis. e.g.: ++ +[source,console] +---- +POST _snapshot/my_repository/snapshot-20200617/_restore +{ + "feature_states": [ "geoip" ], + "indices": "kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", + "include_aliases": true +} +---- +// TEST[skip:illustration purposes only] . Finally we can verify that the indices health is now `green` via the <>. + diff --git a/docs/reference/transform/api-quickref.asciidoc b/docs/reference/transform/api-quickref.asciidoc index 2f46b11abcf2..e6a71f7e2b48 100644 --- a/docs/reference/transform/api-quickref.asciidoc +++ b/docs/reference/transform/api-quickref.asciidoc @@ -18,6 +18,7 @@ _transform/ * <> * <> * <> +* <> * <> For the full list, see <>. diff --git a/docs/reference/transform/apis/get-transform-stats.asciidoc b/docs/reference/transform/apis/get-transform-stats.asciidoc index 5ae4cf4bd497..2a7ed1913546 100644 --- a/docs/reference/transform/apis/get-transform-stats.asciidoc +++ b/docs/reference/transform/apis/get-transform-stats.asciidoc @@ -152,18 +152,18 @@ that the {transform} is failing to keep up. `status`:: (string) Health status of this transform. Statuses are: - `green`::: - The transform is healthy. + * `green`: + The transform is healthy. - `unknown`::: - The health of the transform could not be determined. + * `unknown`: + The health of the transform could not be determined. - `yellow`::: - The functionality of the transform is in a degraded state and may need remediation - to avoid the health becoming `red`. + * `yellow`: + The functionality of the transform is in a degraded state and may need + remediation to avoid the health becoming `red`. - `red`::: - The transform is experiencing an outage or is unavailable for use. + * `red`: + The transform is experiencing an outage or is unavailable for use. `issues`:: (Optional, array) If a non-healthy status is returned, contains a list of issues diff --git a/docs/reference/transform/apis/index.asciidoc b/docs/reference/transform/apis/index.asciidoc index e52daf0ad599..bd3a7588db44 100644 --- a/docs/reference/transform/apis/index.asciidoc +++ b/docs/reference/transform/apis/index.asciidoc @@ -15,6 +15,8 @@ include::reset-transform.asciidoc[leveloffset=+2] include::start-transform.asciidoc[leveloffset=+2] //STOP include::stop-transform.asciidoc[leveloffset=+2] +//SCHEDULE_NOW +include::schedule-now-transform.asciidoc[leveloffset=+2] //UPDATE-UPGRADE include::update-transform.asciidoc[leveloffset=+2] include::upgrade-transforms.asciidoc[leveloffset=+2] diff --git a/docs/reference/transform/apis/schedule-now-transform.asciidoc b/docs/reference/transform/apis/schedule-now-transform.asciidoc new file mode 100644 index 000000000000..9bc42b8b7d30 --- /dev/null +++ b/docs/reference/transform/apis/schedule-now-transform.asciidoc @@ -0,0 +1,63 @@ +[role="xpack"] +[testenv="basic"] +[[schedule-now-transform]] += Schedule Now {transform} API + +[subs="attributes"] +++++ +Shedule Now {transform} +++++ + +Schedules now a {transform}. + +[[schedule-now-transform-request]] +== {api-request-title} + +`POST _transform//_schedule_now` + +[[schedule-now-transform-prereqs]] +== {api-prereq-title} + +* Requires the `manage_transform` cluster privilege. This privilege is included +in the `transform_admin` built-in role. + +[schedule-now-transform-desc]] +== {api-description-title} + +If you _schedule_now a {transform}, it will process the new data instantly, +without waiting for the configured `frequency` interval. +After _schedule_now API is called, the transform will be processed again at +`now + frequency` unless _schedule_now API is called again in the meantime. + +[[schedule-now-transform-path-parms]] +== {api-path-parms-title} + +``:: +(Required, string) +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-id] + +[[schedule-now-transform-query-parms]] +== {api-query-parms-title} + +`timeout`:: +(Optional, time) +Period to wait for a response. If no response is received before the timeout +expires, the request fails and returns an error. Defaults to `30s`. + +[[schedule-now-transform-examples]] +== {api-examples-title} + +[source,console] +-------------------------------------------------- +POST _transform/ecommerce_transform/_schedule_now +-------------------------------------------------- +// TEST[skip:setup kibana sample data] + +When the {transform} is scheduled now, you receive the following results: + +[source,console-result] +---- +{ + "acknowledged" : true +} +---- diff --git a/docs/reference/transform/apis/start-transform.asciidoc b/docs/reference/transform/apis/start-transform.asciidoc index 03f5bbe4d2f8..f4f99f0f3457 100644 --- a/docs/reference/transform/apis/start-transform.asciidoc +++ b/docs/reference/transform/apis/start-transform.asciidoc @@ -61,6 +61,11 @@ Identifier for the {transform}. [[start-transform-query-parms]] == {api-query-parms-title} +`from`:: +(Optional, string) Restricts the set of transformed entities to those changed + after this time. Relative times like now-30d are supported. +Only applicable for continuous transforms. + `timeout`:: (Optional, time) Period to wait for a response. If no response is received before the timeout diff --git a/docs/reference/transform/apis/transform-apis.asciidoc b/docs/reference/transform/apis/transform-apis.asciidoc index f98e192e495d..2912ffb1b17d 100644 --- a/docs/reference/transform/apis/transform-apis.asciidoc +++ b/docs/reference/transform/apis/transform-apis.asciidoc @@ -10,5 +10,6 @@ * <> * <> * <> +* <> * <> * <> diff --git a/docs/reference/transform/transforms-at-scale.asciidoc b/docs/reference/transform/transforms-at-scale.asciidoc index f4ad4fb112dc..f1d47c994324 100644 --- a/docs/reference/transform/transforms-at-scale.asciidoc +++ b/docs/reference/transform/transforms-at-scale.asciidoc @@ -116,6 +116,23 @@ example, greater than `2020-01-01T00:00:00`) to limit which historical indices are accessed. If you use a relative time value (for example, `now-30d`) then this date range is re-evaluated at the point of each checkpoint execution. +Consider using <> in your index names to +reduce the number of indices to resolve in your queries. Add a date pattern +- for example, `yyyy-MM-dd` - to your index names and use it to limit your query +to a specific date. The example below queries indices only from yesterday and +today: + +[source,js] +---------------------------------- + "source": { + "index": [ + "", + "" + ] + }, +---------------------------------- +// NOTCONSOLE + [discrete] [[optimize-shading-strategy]] diff --git a/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc b/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc index ca815dd3c04d..fe79ef57b6ea 100644 --- a/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc +++ b/docs/reference/troubleshooting/common-issues/circuit-breaker-errors.asciidoc @@ -82,7 +82,7 @@ you've enabled fielddata and triggered the <>, consider disabling it and using a `keyword` field instead. See <>. -**Clear the fieldata cache** +**Clear the fielddata cache** If you've triggered the fielddata circuit breaker and can't disable fielddata, use the <> to clear the fielddata cache. @@ -92,4 +92,4 @@ This may disrupt any in-flight searches that use fielddata. ---- POST _cache/clear?fielddata=true ---- -// TEST[s/^/PUT my-index\n/] \ No newline at end of file +// TEST[s/^/PUT my-index\n/] diff --git a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc index 63b84b5e91cb..dc2ce5a4bc25 100644 --- a/docs/reference/troubleshooting/snapshot/add-repository.asciidoc +++ b/docs/reference/troubleshooting/snapshot/add-repository.asciidoc @@ -3,7 +3,9 @@ Multiple {es} deployments are writing to the same snapshot repository. {es} doesn't support this configuration and only one cluster is allowed to write to the same -repository. +repository. See <> for potential side-effects of +corruption of the repository contents, which may not be resolved by the following +guide. To remedy the situation mark the repository as read-only or remove it from all the other deployments, and re-add (recreate) the repository in the current deployment: diff --git a/docs/reference/troubleshooting/troubleshooting-searches.asciidoc b/docs/reference/troubleshooting/troubleshooting-searches.asciidoc index 3aca26671716..90a28058b6d6 100644 --- a/docs/reference/troubleshooting/troubleshooting-searches.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-searches.asciidoc @@ -297,4 +297,25 @@ used by the data stream. For static settings, you need to create a new index with the correct settings. Next, you can reindex the data into that index. For data streams, refer to <>. \ No newline at end of file +for a data stream>>. + +[discrete] +[[troubleshooting-slow-searches]] +=== Find slow queries + +<> can help pinpoint slow performing search +requests. Enabling <> on top can help determine +query source. Add the following settings to the `elasticsearch.yml` configuration file +to trace queries. The resulting logging is verbose, so disable these settings when not +troubleshooting. + +[source,yaml] +---- +xpack.security.audit.enabled: true +xpack.security.audit.logfile.events.include: _all +xpack.security.audit.logfile.events.emit_request_body: true +---- + +Refer to +https://www.elastic.co/blog/advanced-tuning-finding-and-fixing-slow-elasticsearch-queries[Advanced +tuning: finding and fixing slow Elasticsearch queries] for more information. diff --git a/docs/reference/vectors/vector-functions.asciidoc b/docs/reference/vectors/vector-functions.asciidoc index a492d8ad6ff2..e0ed85189c97 100644 --- a/docs/reference/vectors/vector-functions.asciidoc +++ b/docs/reference/vectors/vector-functions.asciidoc @@ -9,13 +9,20 @@ to limit the number of matched documents with a `query` parameter. This is the list of available vector functions and vector access methods: -1. `cosineSimilarity` – calculates cosine similarity -2. `dotProduct` – calculates dot product -3. `l1norm` – calculates L^1^ distance -4. `l2norm` - calculates L^2^ distance -5. `doc[].vectorValue` – returns a vector's value as an array of floats -6. `doc[].magnitude` – returns a vector's magnitude +1. <> – calculates cosine similarity +2. <> – calculates dot product +3. <> – calculates L^1^ distance +4. <> - calculates L^2^ distance +5. <].vectorValue`>> – returns a vector's value as an array of floats +6. <].magnitude`>> – returns a vector's magnitude +NOTE: The recommended way to access dense vectors is through the +`cosineSimilarity`, `dotProduct`, `l1norm` or `l2norm` functions. Please note +however, that you should call these functions only once per script. For example, +don’t use these functions in a loop to calculate the similarity between a +document vector and multiple other vectors. If you need that functionality, +reimplement these functions yourself by +<>. Let's create an index with a `dense_vector` mapping and index a couple of documents into it. @@ -54,6 +61,9 @@ POST my-index-000001/_refresh -------------------------------------------------- // TESTSETUP +[[vector-functions-cosine]] +====== Cosine similarity + The `cosineSimilarity` function calculates the measure of cosine similarity between a given query vector and document vectors. @@ -90,6 +100,9 @@ GET my-index-000001/_search NOTE: If a document's dense vector field has a number of dimensions different from the query's vector, an error will be thrown. +[[vector-functions-dot-product]] +====== Dot product + The `dotProduct` function calculates the measure of dot product between a given query vector and document vectors. @@ -124,6 +137,9 @@ GET my-index-000001/_search <1> Using the standard sigmoid function prevents scores from being negative. +[[vector-functions-l1]] +====== L^1^ distance (Manhattan distance) + The `l1norm` function calculates L^1^ distance (Manhattan distance) between a given query vector and document vectors. @@ -163,6 +179,9 @@ we reversed the output from `l1norm` and `l2norm`. Also, to avoid division by 0 when a document vector matches the query exactly, we added `1` in the denominator. +[[vector-functions-l2]] +====== L^2^ distance (Euclidean distance) + The `l2norm` function calculates L^2^ distance (Euclidean distance) between a given query vector and document vectors. @@ -193,10 +212,13 @@ GET my-index-000001/_search } -------------------------------------------------- -NOTE: If a document doesn't have a value for a vector field on which -a vector function is executed, an error will be thrown. +[[vector-functions-missing-values]] +====== Checking for missing values + +If a document doesn't have a value for a vector field on which a vector function +is executed, an error will be thrown. -You can check if a document has a value for the field `my_vector` by +You can check if a document has a value for the field `my_vector` with `doc['my_vector'].size() == 0`. Your overall script can look like this: [source,js] @@ -205,9 +227,10 @@ You can check if a document has a value for the field `my_vector` by -------------------------------------------------- // NOTCONSOLE -The recommended way to access dense vectors is through `cosineSimilarity`, -`dotProduct`, `l1norm` or `l2norm` functions. But for custom use cases, -you can access dense vectors's values directly through the following functions: +[[vector-functions-accessing-vectors]] +====== Accessing vectors directly + +You can access vector values directly through the following functions: - `doc[].vectorValue` – returns a vector's value as an array of floats diff --git a/docs/src/test/cluster/config/userdict_ko.txt b/docs/src/test/cluster/config/userdict_ko.txt index 63c1c3a1e224..b70fb43a0092 100644 --- a/docs/src/test/cluster/config/userdict_ko.txt +++ b/docs/src/test/cluster/config/userdict_ko.txt @@ -1,5 +1,5 @@ # Additional nouns c++ -C샤프 +C쁠쁠 세종 -세종시 세종 시 \ No newline at end of file +세종시 세종 시 diff --git a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index db2834f4abed..3754ca033998 100644 --- a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ClientYamlDocsTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; @@ -43,7 +44,6 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import java.util.Locale; import java.util.Map; import static java.util.Collections.emptyMap; @@ -316,7 +316,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx Object previousSecond = null; while (firstTokens.hasNext()) { if (false == secondTokens.hasNext()) { - fail(String.format(Locale.ROOT, """ + fail(Strings.format(""" %s has fewer tokens than %s. %s has [%s] but %s is out of tokens. \ %s's last token was [%s] and %s's last token was' [%s] """, second, first, first, firstTokens.next(), second, first, previousFirst, second, previousSecond)); @@ -327,7 +327,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx String secondText = (String) secondToken.get("token"); // Check the text and produce an error message with the utf8 sequence if they don't match. if (false == secondText.equals(firstText)) { - fail(String.format(Locale.ROOT, """ + fail(Strings.format(""" text differs: %s was [%s] but %s was [%s]. In utf8 those are %s and %s @@ -339,7 +339,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx previousSecond = secondToken; } if (secondTokens.hasNext()) { - fail(String.format(Locale.ROOT, """ + fail(Strings.format(""" %s has more tokens than %s. %s has [%s] but %s is out of tokens. \ %s's last token was [%s] and %s's last token was [%s] """, second, first, second, secondTokens.next(), first, first, previousFirst, second, previousSecond)); diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index b158b94c8f8a..638c2ac14187 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -1,6 +1,6 @@ [versions] asm = "9.3" -jackson = "2.14.0" +jackson = "2.14.2" junit5 = "5.8.1" spock = "2.1-groovy-3.0" @@ -39,6 +39,6 @@ shadow-plugin = "gradle.plugin.com.github.johnrengelman:shadow:7.1.2" spock-core = { group = "org.spockframework", name="spock-core", version.ref="spock" } spock-junit4 = { group = "org.spockframework", name="spock-junit4", version.ref="spock" } spock-platform = { group = "org.spockframework", name="spock-bom", version.ref="spock" } -spotless-plugin = "com.diffplug.spotless:spotless-plugin-gradle:6.7.2" +spotless-plugin = "com.diffplug.spotless:spotless-plugin-gradle:6.11.0" wiremock = "com.github.tomakehurst:wiremock-jre8-standalone:2.23.2" xmlunit-core = "org.xmlunit:xmlunit-core:2.8.2" diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index f6f16e818873..1e220398dae3 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -114,21 +114,41 @@ + + + + + + + + + + + + + + + + + + + + @@ -184,19 +204,19 @@ - - - + + + - - + + - - - + + + @@ -204,49 +224,94 @@ - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -259,6 +324,11 @@ + + + + + @@ -284,19 +354,19 @@ - - - + + + - - - + + + - - - + + + @@ -304,19 +374,19 @@ - - - + + + - - - + + + - - - + + + @@ -324,34 +394,39 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + + + + + + @@ -364,6 +439,11 @@ + + + + + @@ -469,9 +549,14 @@ - - - + + + + + + + + @@ -484,6 +569,11 @@ + + + + + @@ -494,6 +584,11 @@ + + + + + @@ -529,11 +624,6 @@ - - - - - @@ -544,24 +634,14 @@ - - - - - - - - + + + - - - - - - - - + + + @@ -574,9 +654,9 @@ - - - + + + @@ -609,6 +689,11 @@ + + + + + @@ -664,14 +749,14 @@ - - - + + + - - - + + + @@ -1069,9 +1154,9 @@ - - - + + + @@ -1099,9 +1184,9 @@ - - - + + + @@ -1109,29 +1194,29 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -1139,9 +1224,9 @@ - - - + + + @@ -1149,14 +1234,14 @@ - - - + + + - - - + + + @@ -1164,14 +1249,14 @@ - - - + + + - - - + + + @@ -1179,9 +1264,9 @@ - - - + + + @@ -1194,9 +1279,9 @@ - - - + + + @@ -1209,6 +1294,11 @@ + + + + + @@ -1219,6 +1309,11 @@ + + + + + @@ -1244,14 +1339,19 @@ - - - + + + + + + + + - - - + + + @@ -1482,25 +1582,25 @@ - - - + + + - - + + - - - + + + - - + + - - - + + + @@ -1513,6 +1613,11 @@ + + + + + @@ -1523,6 +1628,16 @@ + + + + + + + + + + @@ -1538,6 +1653,11 @@ + + + + + @@ -2431,121 +2551,481 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -2556,19 +3036,19 @@ - - - + + + - - - + + + - - - + + + @@ -2616,59 +3096,59 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -2959,9 +3439,9 @@ - - - + + + @@ -3044,6 +3524,11 @@ + + + + + @@ -3399,6 +3884,11 @@ + + + + + @@ -3449,6 +3939,11 @@ + + + + + @@ -3459,6 +3954,11 @@ + + + + + @@ -3539,9 +4039,9 @@ - - - + + + diff --git a/libs/build.gradle b/libs/build.gradle index f2b7e247fabd..85c331149fc5 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -26,6 +26,7 @@ configure(subprojects - project('elasticsearch-log4j')) { && false == isPluginApi(project, depProject) && false == depProject.path.equals(':libs:elasticsearch-x-content') && false == depProject.path.equals(':libs:elasticsearch-core') + && false == depProject.path.equals(':libs:elasticsearch-plugin-api') && depProject.path.startsWith(':libs') && depProject.name.startsWith('elasticsearch-')) { throw new InvalidUserDataException("projects in :libs " diff --git a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java index 5f5331c55549..42073489faf8 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java +++ b/libs/core/src/main/java/org/elasticsearch/core/AbstractRefCounted.java @@ -8,6 +8,7 @@ package org.elasticsearch.core; +import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; /** @@ -47,7 +48,7 @@ public final boolean tryIncRef() { public final boolean decRef() { touch(); int i = refCount.decrementAndGet(); - assert i >= 0; + assert i >= 0 : "invalid decRef call: already closed"; if (i == 0) { try { closeInternal(); @@ -94,11 +95,18 @@ public final int refCount() { * Construct an {@link AbstractRefCounted} which runs the given {@link Runnable} when all references are released. */ public static AbstractRefCounted of(Runnable onClose) { + Objects.requireNonNull(onClose); return new AbstractRefCounted() { @Override protected void closeInternal() { onClose.run(); } + + @Override + public String toString() { + return "refCounted[" + onClose + "]"; + } }; } + } diff --git a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java index 05e3d154929a..2d6781326a17 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java +++ b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java @@ -10,7 +10,7 @@ import java.io.IOException; import java.io.UncheckedIOException; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; /** Utility methods to work with {@link Releasable}s. */ public enum Releasables { @@ -98,13 +98,22 @@ public static Releasable wrap(final Releasable... releasables) { } /** - * Wraps a {@link Releasable} such that its {@link Releasable#close()} method can be called multiple times without double releasing. + * Wraps a {@link Releasable} such that its {@link Releasable#close()} method can be called multiple times without double-releasing. */ public static Releasable releaseOnce(final Releasable releasable) { - final AtomicBoolean released = new AtomicBoolean(false); - return () -> { - if (released.compareAndSet(false, true)) { - releasable.close(); + final var ref = new AtomicReference<>(releasable); + return new Releasable() { + @Override + public void close() { + final var acquired = ref.getAndSet(null); + if (acquired != null) { + acquired.close(); + } + } + + @Override + public String toString() { + return "releaseOnce[" + ref.get() + "]"; } }; } diff --git a/libs/core/src/main/java/org/elasticsearch/core/Streams.java b/libs/core/src/main/java/org/elasticsearch/core/Streams.java index c32c7ddb8b1f..f28a8615c82f 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/Streams.java +++ b/libs/core/src/main/java/org/elasticsearch/core/Streams.java @@ -12,6 +12,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.ByteBuffer; /** * Simple utility methods for file and stream copying. All copy methods close all affected streams when done. @@ -78,6 +79,62 @@ public static long copy(final InputStream in, final OutputStream out) throws IOE return copy(in, out, LOCAL_BUFFER.get(), true); } + /** + * Read up to {code count} bytes from {@code input} and store them into {@code buffer}. + * The buffers position will be incremented by the number of bytes read from the stream. + * @param input stream to read from + * @param buffer buffer to read into + * @param count maximum number of bytes to read + * @return number of bytes read from the stream + * @throws IOException in case of I/O errors + */ + public static int read(InputStream input, ByteBuffer buffer, int count) throws IOException { + if (buffer.hasArray()) { + return readToHeapBuffer(input, buffer, count); + } + return readToDirectBuffer(input, buffer, count); + } + + private static int readToHeapBuffer(InputStream input, ByteBuffer buffer, int count) throws IOException { + final int pos = buffer.position(); + int read = readFully(input, buffer.array(), buffer.arrayOffset() + pos, count); + if (read > 0) { + buffer.position(pos + read); + } + return read; + } + + private static int readToDirectBuffer(InputStream input, ByteBuffer b, int count) throws IOException { + int totalRead = 0; + final byte[] buffer = LOCAL_BUFFER.get(); + while (totalRead < count) { + final int len = Math.min(count - totalRead, buffer.length); + final int read = input.read(buffer, 0, len); + if (read == -1) { + break; + } + b.put(buffer, 0, read); + totalRead += read; + } + return totalRead; + } + + public static int readFully(InputStream reader, byte[] dest) throws IOException { + return readFully(reader, dest, 0, dest.length); + } + + public static int readFully(InputStream reader, byte[] dest, int offset, int len) throws IOException { + int read = 0; + while (read < len) { + final int r = reader.read(dest, offset + read, len - read); + if (r == -1) { + break; + } + read += r; + } + return read; + } + /** * Wraps an {@link OutputStream} such that it's {@code close} method becomes a noop * diff --git a/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java b/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java deleted file mode 100644 index a373161c71d5..000000000000 --- a/libs/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTests.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.common.util.concurrent; - -import org.elasticsearch.core.AbstractRefCounted; -import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; - -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class RefCountedTests extends ESTestCase { - - public void testRefCount() { - MyRefCounted counted = new MyRefCounted(); - - int incs = randomIntBetween(1, 100); - for (int i = 0; i < incs; i++) { - if (randomBoolean()) { - counted.incRef(); - } else { - assertTrue(counted.tryIncRef()); - } - counted.ensureOpen(); - } - - for (int i = 0; i < incs; i++) { - counted.decRef(); - counted.ensureOpen(); - } - - counted.incRef(); - counted.decRef(); - for (int i = 0; i < incs; i++) { - if (randomBoolean()) { - counted.incRef(); - } else { - assertTrue(counted.tryIncRef()); - } - counted.ensureOpen(); - } - - for (int i = 0; i < incs; i++) { - counted.decRef(); - counted.ensureOpen(); - } - - counted.decRef(); - assertFalse(counted.tryIncRef()); - assertThat( - expectThrows(IllegalStateException.class, counted::incRef).getMessage(), - equalTo(AbstractRefCounted.ALREADY_CLOSED_MESSAGE) - ); - assertThat(expectThrows(IllegalStateException.class, counted::ensureOpen).getMessage(), equalTo("closed")); - } - - public void testMultiThreaded() throws InterruptedException { - final MyRefCounted counted = new MyRefCounted(); - Thread[] threads = new Thread[randomIntBetween(2, 5)]; - final CountDownLatch latch = new CountDownLatch(1); - final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - try { - latch.await(); - for (int j = 0; j < 10000; j++) { - counted.incRef(); - assertTrue(counted.hasReferences()); - try { - counted.ensureOpen(); - } finally { - counted.decRef(); - } - } - } catch (Exception e) { - exceptions.add(e); - } - }); - threads[i].start(); - } - latch.countDown(); - for (Thread thread : threads) { - thread.join(); - } - counted.decRef(); - assertThat(expectThrows(IllegalStateException.class, counted::ensureOpen).getMessage(), equalTo("closed")); - assertThat( - expectThrows(IllegalStateException.class, counted::incRef).getMessage(), - equalTo(AbstractRefCounted.ALREADY_CLOSED_MESSAGE) - ); - assertThat(counted.refCount(), is(0)); - assertFalse(counted.hasReferences()); - assertThat(exceptions, Matchers.emptyIterable()); - } - - private static final class MyRefCounted extends AbstractRefCounted { - - private final AtomicBoolean closed = new AtomicBoolean(false); - - @Override - protected void closeInternal() { - this.closed.set(true); - } - - public void ensureOpen() { - if (closed.get()) { - assertEquals(0, this.refCount()); - assertFalse(hasReferences()); - throw new IllegalStateException("closed"); - } - } - } -} diff --git a/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java b/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java new file mode 100644 index 000000000000..9610bae32a77 --- /dev/null +++ b/libs/core/src/test/java/org/elasticsearch/core/AbstractRefCountedTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.core; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class AbstractRefCountedTests extends ESTestCase { + + public void testRefCount() { + final RefCounted counted = createRefCounted(); + + int incs = randomIntBetween(1, 100); + for (int i = 0; i < incs; i++) { + if (randomBoolean()) { + counted.incRef(); + } else { + assertTrue(counted.tryIncRef()); + } + assertTrue(counted.hasReferences()); + } + + for (int i = 0; i < incs; i++) { + counted.decRef(); + assertTrue(counted.hasReferences()); + } + + counted.incRef(); + counted.decRef(); + for (int i = 0; i < incs; i++) { + if (randomBoolean()) { + counted.incRef(); + } else { + assertTrue(counted.tryIncRef()); + } + assertTrue(counted.hasReferences()); + } + + for (int i = 0; i < incs; i++) { + counted.decRef(); + assertTrue(counted.hasReferences()); + } + + counted.decRef(); + assertFalse(counted.tryIncRef()); + assertThat( + expectThrows(IllegalStateException.class, counted::incRef).getMessage(), + equalTo(AbstractRefCounted.ALREADY_CLOSED_MESSAGE) + ); + assertFalse(counted.hasReferences()); + } + + public void testMultiThreaded() throws InterruptedException { + final AbstractRefCounted counted = createRefCounted(); + final Thread[] threads = new Thread[randomIntBetween(2, 5)]; + final CountDownLatch latch = new CountDownLatch(1); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + try { + latch.await(); + for (int j = 0; j < 10000; j++) { + assertTrue(counted.hasReferences()); + if (randomBoolean()) { + counted.incRef(); + } else { + assertTrue(counted.tryIncRef()); + } + assertTrue(counted.hasReferences()); + counted.decRef(); + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + counted.decRef(); + assertFalse(counted.hasReferences()); + assertThat( + expectThrows(IllegalStateException.class, counted::incRef).getMessage(), + equalTo(AbstractRefCounted.ALREADY_CLOSED_MESSAGE) + ); + assertThat(counted.refCount(), is(0)); + assertFalse(counted.hasReferences()); + } + + public void testToString() { + assertEquals("refCounted[runnable description]", createRefCounted().toString()); + } + + public void testNullCheck() { + expectThrows(NullPointerException.class, () -> AbstractRefCounted.of(null)); + } + + private static AbstractRefCounted createRefCounted() { + final var closed = new AtomicBoolean(); + return AbstractRefCounted.of(new Runnable() { + @Override + public void run() { + assertTrue(closed.compareAndSet(false, true)); + } + + @Override + public String toString() { + return "runnable description"; + } + }); + } +} diff --git a/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoaderTests.java b/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoaderTests.java index 9cabf7a1ab97..db517b9cfabd 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoaderTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/internal/provider/EmbeddedImplClassLoaderTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.core.internal.provider; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.internal.provider.EmbeddedImplClassLoader.CompoundEnumeration; import org.elasticsearch.test.ESTestCase; @@ -452,7 +453,7 @@ private void testResourcesVersioned(String resourcePath, boolean enableMulti, in // getResources var urls1 = Collections.list(urlcLoader.getResources(resourcePath)).stream().map(URL::toString).toList(); var urls2 = Collections.list(embedLoader.getResources(resourcePath)).stream().map(URL::toString).toList(); - assertThat(String.format(Locale.ROOT, "urls1=%s, urls2=%s", urls1, urls2), urls2, hasSize(1)); + assertThat(Strings.format("urls1=%s, urls2=%s", urls1, urls2), urls2, hasSize(1)); assertThat(urls1.get(0), endsWith("!/" + expectedURLSuffix)); assertThat(urls2.get(0), endsWith("impl.jar!/IMPL-JARS/res/res-impl.jar/" + expectedURLSuffix)); diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/CircleTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/CircleTests.java index bae91638bd80..321dd5ccd4e8 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/CircleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/CircleTests.java @@ -59,4 +59,9 @@ public void testInitValidation() { StandardValidator.instance(true).validate(new Circle(200, 10, 1, 20)); } + + @Override + protected Circle mutateInstance(Circle instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java index 45842505ce5f..6a7bda7f9e0b 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/GeometryCollectionTests.java @@ -64,4 +64,9 @@ public void testInitValidation() { StandardValidator.instance(true).validate(new GeometryCollection(Collections.singletonList(new Point(20, 10, 30)))); } + + @Override + protected GeometryCollection mutateInstance(GeometryCollection instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/LineTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/LineTests.java index 3003a243c96a..d3c386883fe0 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/LineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/LineTests.java @@ -80,4 +80,9 @@ public void testWKTValidation() { ); assertEquals("found Z value [6.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); } + + @Override + protected Line mutateInstance(Line instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/MultiLineTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/MultiLineTests.java index b7d47784026d..68acba559626 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/MultiLineTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/MultiLineTests.java @@ -62,4 +62,9 @@ public void testValidation() { new MultiLine(Collections.singletonList(new Line(new double[] { 3, 4 }, new double[] { 1, 2 }, new double[] { 6, 5 }))) ); } + + @Override + protected MultiLine mutateInstance(MultiLine instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPointTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPointTests.java index 011d29322c9d..b25057182e95 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPointTests.java @@ -69,4 +69,9 @@ public void testValidation() { StandardValidator.instance(true).validate(new MultiPoint(Collections.singletonList(new Point(2, 1, 3)))); } + + @Override + protected MultiPoint mutateInstance(MultiPoint instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPolygonTests.java index bb6046d60960..ff4a6047f263 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/MultiPolygonTests.java @@ -78,4 +78,9 @@ public void testValidation() { ) ); } + + @Override + protected MultiPolygon mutateInstance(MultiPolygon instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/PointTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/PointTests.java index 40ae05802ae8..136c6b8154b1 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/PointTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/PointTests.java @@ -56,4 +56,9 @@ public void testWKTValidation() { ); assertEquals("found Z value [100.0] but [ignore_z_value] parameter is [false]", ex.getMessage()); } + + @Override + protected Point mutateInstance(Point instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java index 5445de367c21..7c9fdcf7bf16 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/PolygonTests.java @@ -132,4 +132,9 @@ public void testWKTValidation() { ex.getMessage() ); } + + @Override + protected Polygon mutateInstance(Polygon instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/geo/src/test/java/org/elasticsearch/geometry/RectangleTests.java b/libs/geo/src/test/java/org/elasticsearch/geometry/RectangleTests.java index b89ea7fe279c..88315412524b 100644 --- a/libs/geo/src/test/java/org/elasticsearch/geometry/RectangleTests.java +++ b/libs/geo/src/test/java/org/elasticsearch/geometry/RectangleTests.java @@ -55,4 +55,9 @@ public void testInitValidation() { StandardValidator.instance(true).validate(new Rectangle(50, 10, 40, 30, 20, 60)); } + + @Override + protected Rectangle mutateInstance(Rectangle instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureExtracter.java b/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureExtracter.java index a4acf0921d36..8a75d125646f 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureExtracter.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureExtracter.java @@ -20,28 +20,49 @@ /** * How to extract matches. */ -public abstract class GrokCaptureExtracter { +public interface GrokCaptureExtracter { + /** * Extract {@link Map} results. This implementation of {@link GrokCaptureExtracter} * is mutable and should be discarded after collecting a single result. */ - static class MapExtracter extends GrokCaptureExtracter { + class MapExtracter implements GrokCaptureExtracter { private final Map result; private final List fieldExtracters; + @SuppressWarnings("unchecked") MapExtracter(List captureConfig) { result = captureConfig.isEmpty() ? emptyMap() : new HashMap<>(); fieldExtracters = new ArrayList<>(captureConfig.size()); for (GrokCaptureConfig config : captureConfig) { - fieldExtracters.add(config.objectExtracter(v -> result.put(config.name(), v))); + fieldExtracters.add(config.objectExtracter(value -> { + var key = config.name(); + + // Logstash's Grok processor flattens the list of values to a single value in case there's only 1 match, + // so we have to do the same to be compatible. + // e.g.: + // pattern = `%{SINGLEDIGIT:name}(%{SINGLEDIGIT:name})?` + // - GROK(pattern, "1") => { name: 1 } + // - GROK(pattern, "12") => { name: [1, 2] } + if (result.containsKey(key)) { + if (result.get(key)instanceof List values) { + ((ArrayList) values).add(value); + } else { + var values = new ArrayList<>(); + values.add(result.get(key)); + values.add(value); + result.put(key, values); + } + } else { + result.put(key, value); + } + })); } } @Override - void extract(byte[] utf8Bytes, int offset, Region region) { - for (GrokCaptureExtracter extracter : fieldExtracters) { - extracter.extract(utf8Bytes, offset, region); - } + public void extract(byte[] utf8Bytes, int offset, Region region) { + fieldExtracters.forEach(extracter -> extracter.extract(utf8Bytes, offset, region)); } Map result() { @@ -49,5 +70,5 @@ Map result() { } } - abstract void extract(byte[] utf8Bytes, int offset, Region region); + void extract(byte[] utf8Bytes, int offset, Region region); } diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java b/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java index d301febb261e..50ac44c7e13b 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/GrokCaptureType.java @@ -9,7 +9,6 @@ package org.elasticsearch.grok; import org.elasticsearch.grok.GrokCaptureConfig.NativeExtracterMap; -import org.joni.Region; import java.nio.charset.StandardCharsets; import java.util.function.Consumer; @@ -70,16 +69,12 @@ static GrokCaptureType fromString(String str) { } protected final GrokCaptureExtracter rawExtracter(int[] backRefs, Consumer emit) { - return new GrokCaptureExtracter() { - @Override - void extract(byte[] utf8Bytes, int offset, Region region) { - for (int number : backRefs) { - if (region.beg[number] >= 0) { - int matchOffset = offset + region.beg[number]; - int matchLength = region.end[number] - region.beg[number]; - emit.accept(new String(utf8Bytes, matchOffset, matchLength, StandardCharsets.UTF_8)); - return; // Capture only the first value. - } + return (utf8Bytes, offset, region) -> { + for (int number : backRefs) { + if (region.beg[number] >= 0) { + int matchOffset = offset + region.beg[number]; + int matchLength = region.end[number] - region.beg[number]; + emit.accept(new String(utf8Bytes, matchOffset, matchLength, StandardCharsets.UTF_8)); } } }; diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java index b668b73443e6..0a134f7ce56b 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/GrokTests.java @@ -801,9 +801,15 @@ public void testMultipleNamedCapturesWithSameName() { Grok grok = new Grok(bank, "%{SINGLEDIGIT:num}%{SINGLEDIGIT:num}", logger::warn); assertCaptureConfig(grok, Map.of("num", STRING)); - Map expected = new HashMap<>(); - expected.put("num", "1"); - assertThat(grok.captures("12"), equalTo(expected)); + assertThat(grok.captures("12"), equalTo(Map.of("num", List.of("1", "2")))); + + grok = new Grok(bank, "%{SINGLEDIGIT:num:int}(%{SINGLEDIGIT:num:int})?", logger::warn); + assertCaptureConfig(grok, Map.of("num", INTEGER)); + assertEquals(grok.captures("1"), Map.of("num", 1)); + assertEquals(grok.captures("1a"), Map.of("num", 1)); + assertEquals(grok.captures("a1"), Map.of("num", 1)); + assertEquals(grok.captures("12"), Map.of("num", List.of(1, 2))); + assertEquals(grok.captures("123"), Map.of("num", List.of(1, 2))); } public void testExponentialExpressions() { diff --git a/libs/h3/NOTICE.txt b/libs/h3/NOTICE.txt index a8e5e1c94c87..7074527ca3fb 100644 --- a/libs/h3/NOTICE.txt +++ b/libs/h3/NOTICE.txt @@ -19,3 +19,33 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-- +This project contains code sourced from https://github.com/jeffhain/jafama which is licensed under the Apache 2.0 License. + +Copyright 2012 Jeff Hain + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +============================================================================= +Notice of fdlibm package this program is partially derived from: + +Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + +Developed at SunSoft, a Sun Microsystems, Inc. business. +Permission to use, copy, modify, and distribute this +software is freely granted, provided that this notice +is preserved. +============================================================================= + + diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java b/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java index e4aa9876ddfa..5192fe836e73 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/Constants.java @@ -30,6 +30,10 @@ final class Constants { * sqrt(3) / 2.0 */ public static double M_SQRT3_2 = 0.8660254037844386467637231707529361834714; + /** + * 2.0 * PI + */ + public static final double M_2PI = 6.28318530717958647692528676655900576839433; /** * max H3 resolution; H3 version 1 has 16 resolutions, numbered 0 through 15 */ diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/CoordIJK.java b/libs/h3/src/main/java/org/elasticsearch/h3/CoordIJK.java index 8ef22379492c..e57f681fc2ea 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/CoordIJK.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/CoordIJK.java @@ -106,11 +106,20 @@ void reset(int i, int j, int k) { * Find the center point in 2D cartesian coordinates of a hex. */ public Vec2d ijkToHex2d() { - int i = this.i - this.k; - int j = this.j - this.k; + final int i = Math.subtractExact(this.i, this.k); + final int j = Math.subtractExact(this.j, this.k); return new Vec2d(i - 0.5 * j, j * Constants.M_SQRT3_2); } + /** + * Find the center point in spherical coordinates of a hex on a particular icosahedral face. + */ + public LatLng ijkToGeo(int face, int res, boolean substrate) { + final int i = Math.subtractExact(this.i, this.k); + final int j = Math.subtractExact(this.j, this.k); + return Vec2d.hex2dToGeo(i - 0.5 * j, j * Constants.M_SQRT3_2, face, res, substrate); + } + /** * Add ijk coordinates. * @@ -120,9 +129,9 @@ public Vec2d ijkToHex2d() { */ public void ijkAdd(int i, int j, int k) { - this.i += i; - this.j += j; - this.k += k; + this.i = Math.addExact(this.i, i); + this.j = Math.addExact(this.j, j); + this.k = Math.addExact(this.k, k); } /** @@ -133,9 +142,9 @@ public void ijkAdd(int i, int j, int k) { * @param k the k coordinate */ public void ijkSub(int i, int j, int k) { - this.i -= i; - this.j -= j; - this.k -= k; + this.i = Math.subtractExact(this.i, i); + this.j = Math.subtractExact(this.j, j); + this.k = Math.subtractExact(this.k, k); } /** @@ -144,9 +153,7 @@ public void ijkSub(int i, int j, int k) { */ public void ijkNormalize() { final int min = Math.min(i, Math.min(j, k)); - i -= min; - j -= min; - k -= min; + ijkSub(min, min, min); } /** @@ -158,9 +165,9 @@ public void downAp7() { // iVec (3, 0, 1) // jVec (1, 3, 0) // kVec (0, 1, 3) - final int i = this.i * 3 + this.j * 1 + this.k * 0; - final int j = this.i * 0 + this.j * 3 + this.k * 1; - final int k = this.i * 1 + this.j * 0 + this.k * 3; + final int i = Math.addExact(Math.multiplyExact(this.i, 3), this.j); + final int j = Math.addExact(Math.multiplyExact(this.j, 3), this.k); + final int k = Math.addExact(Math.multiplyExact(this.k, 3), this.i); this.i = i; this.j = j; this.k = k; @@ -175,9 +182,9 @@ public void downAp7r() { // iVec (3, 1, 0) // jVec (0, 3, 1) // kVec (1, 0, 3) - final int i = this.i * 3 + this.j * 0 + this.k * 1; - final int j = this.i * 1 + this.j * 3 + this.k * 0; - final int k = this.i * 0 + this.j * 1 + this.k * 3; + final int i = Math.addExact(Math.multiplyExact(this.i, 3), this.k); + final int j = Math.addExact(Math.multiplyExact(this.j, 3), this.i); + final int k = Math.addExact(Math.multiplyExact(this.k, 3), this.j); this.i = i; this.j = j; this.k = k; @@ -193,9 +200,9 @@ public void downAp3() { // iVec (2, 0, 1) // jVec (1, 2, 0) // kVec (0, 1, 2) - final int i = this.i * 2 + this.j * 1 + this.k * 0; - final int j = this.i * 0 + this.j * 2 + this.k * 1; - final int k = this.i * 1 + this.j * 0 + this.k * 2; + final int i = Math.addExact(Math.multiplyExact(this.i, 2), this.j); + final int j = Math.addExact(Math.multiplyExact(this.j, 2), this.k); + final int k = Math.addExact(Math.multiplyExact(this.k, 2), this.i); this.i = i; this.j = j; this.k = k; @@ -211,9 +218,9 @@ public void downAp3r() { // iVec (2, 1, 0) // jVec (0, 2, 1) // kVec (1, 0, 2) - final int i = this.i * 2 + this.j * 0 + this.k * 1; - final int j = this.i * 1 + this.j * 2 + this.k * 0; - final int k = this.i * 0 + this.j * 1 + this.k * 2; + final int i = Math.addExact(Math.multiplyExact(this.i, 2), this.k); + final int j = Math.addExact(Math.multiplyExact(this.j, 2), this.i); + final int k = Math.addExact(Math.multiplyExact(this.k, 2), this.j); this.i = i; this.j = j; this.k = k; @@ -229,9 +236,9 @@ public void ijkRotate60cw() { // iVec (1, 0, 1) // jVec (1, 1, 0) // kVec (0, 1, 1) - final int i = this.i * 1 + this.j * 1 + this.k * 0; - final int j = this.i * 0 + this.j * 1 + this.k * 1; - final int k = this.i * 1 + this.j * 0 + this.k * 1; + final int i = Math.addExact(this.i, this.j); + final int j = Math.addExact(this.j, this.k); + final int k = Math.addExact(this.i, this.k); this.i = i; this.j = j; this.k = k; @@ -246,9 +253,9 @@ public void ijkRotate60ccw() { // iVec (1, 1, 0) // jVec (0, 1, 1) // kVec (1, 0, 1) - final int i = this.i * 1 + this.j * 0 + this.k * 1; - final int j = this.i * 1 + this.j * 1 + this.k * 0; - final int k = this.i * 0 + this.j * 1 + this.k * 1; + final int i = Math.addExact(this.i, this.k); + final int j = Math.addExact(this.i, this.j); + final int k = Math.addExact(this.j, this.k); this.i = i; this.j = j; this.k = k; @@ -272,12 +279,10 @@ public void neighbor(int digit) { * clockwise aperture 7 grid. */ public void upAp7r() { - i = this.i - this.k; - j = this.j - this.k; - int i = (int) Math.round((2 * this.i + this.j) / 7.0); - int j = (int) Math.round((3 * this.j - this.i) / 7.0); - this.i = i; - this.j = j; + final int i = Math.subtractExact(this.i, this.k); + final int j = Math.subtractExact(this.j, this.k); + this.i = (int) Math.round((Math.addExact(Math.multiplyExact(2, i), j)) / 7.0); + this.j = (int) Math.round((Math.subtractExact(Math.multiplyExact(3, j), i)) / 7.0); this.k = 0; ijkNormalize(); } @@ -288,12 +293,10 @@ public void upAp7r() { * */ public void upAp7() { - i = this.i - this.k; - j = this.j - this.k; - int i = (int) Math.round((3 * this.i - this.j) / 7.0); - int j = (int) Math.round((this.i + 2 * this.j) / 7.0); - this.i = i; - this.j = j; + final int i = Math.subtractExact(this.i, this.k); + final int j = Math.subtractExact(this.j, this.k); + this.i = (int) Math.round((Math.subtractExact(Math.multiplyExact(3, i), j)) / 7.0); + this.j = (int) Math.round((Math.addExact(Math.multiplyExact(2, j), i)) / 7.0); this.k = 0; ijkNormalize(); } diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java b/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java index 24eddd6cf365..df2ab26ca068 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/FaceIJK.java @@ -421,8 +421,7 @@ public Overage adjustOverageClassII(int res, boolean pentLeading4, boolean subst * @param res The H3 resolution of the cell. */ public LatLng faceIjkToGeo(int res) { - Vec2d v = coord.ijkToHex2d(); - return v.hex2dToGeo(face, res, false); + return coord.ijkToGeo(face, res, false); } /** @@ -487,7 +486,11 @@ public CellBoundary faceIjkPentToCellBoundary(int res, int start, int length) { } final int unitScale = unitScaleByCIIres[adjRes] * 3; - lastCoord.ijkAdd(fijkOrient.translateI * unitScale, fijkOrient.translateJ * unitScale, fijkOrient.translateK * unitScale); + lastCoord.ijkAdd( + Math.multiplyExact(fijkOrient.translateI, unitScale), + Math.multiplyExact(fijkOrient.translateJ, unitScale), + Math.multiplyExact(fijkOrient.translateK, unitScale) + ); lastCoord.ijkNormalize(); final Vec2d orig2d1 = lastCoord.ijkToHex2d(); @@ -522,8 +525,7 @@ public CellBoundary faceIjkPentToCellBoundary(int res, int start, int length) { // vert == start + NUM_PENT_VERTS is only used to test for possible // intersection on last edge if (vert < start + Constants.NUM_PENT_VERTS) { - final Vec2d vec = fijk.coord.ijkToHex2d(); - final LatLng point = vec.hex2dToGeo(fijk.face, adjRes, true); + final LatLng point = fijk.coord.ijkToGeo(fijk.face, adjRes, true); boundary.add(point); } lastFace = fijk.face; @@ -594,10 +596,18 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final // to each vertex to translate the vertices to that cell. final int[] vertexLast = verts[lastV]; final int[] vertexV = verts[v]; - scratch2.reset(vertexLast[0] + coord.i, vertexLast[1] + coord.j, vertexLast[2] + coord.k); + scratch2.reset( + Math.addExact(vertexLast[0], this.coord.i), + Math.addExact(vertexLast[1], this.coord.j), + Math.addExact(vertexLast[2], this.coord.k) + ); scratch2.ijkNormalize(); final Vec2d orig2d0 = scratch2.ijkToHex2d(); - scratch2.reset(vertexV[0] + coord.i, vertexV[1] + coord.j, vertexV[2] + coord.k); + scratch2.reset( + Math.addExact(vertexV[0], this.coord.i), + Math.addExact(vertexV[1], this.coord.j), + Math.addExact(vertexV[2], this.coord.k) + ); scratch2.ijkNormalize(); final Vec2d orig2d1 = scratch2.ijkToHex2d(); @@ -628,7 +638,7 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final adjacent hexagon edge will lie completely on a single icosahedron face, and no additional vertex is required. */ - final boolean isIntersectionAtVertex = orig2d0.equals(inter) || orig2d1.equals(inter); + final boolean isIntersectionAtVertex = orig2d0.numericallyIdentical(inter) || orig2d1.numericallyIdentical(inter); if (isIntersectionAtVertex == false) { final LatLng point = inter.hex2dToGeo(this.face, adjRes, true); boundary.add(point); @@ -639,8 +649,7 @@ public CellBoundary faceIjkToCellBoundary(final int res, final int start, final // vert == start + NUM_HEX_VERTS is only used to test for possible // intersection on last edge if (vert < start + Constants.NUM_HEX_VERTS) { - final Vec2d vec = fijk.coord.ijkToHex2d(); - final LatLng point = vec.hex2dToGeo(fijk.face, adjRes, true); + final LatLng point = fijk.coord.ijkToGeo(fijk.face, adjRes, true); boundary.add(point); } lastFace = fijk.face; @@ -695,7 +704,7 @@ static long faceIjkToH3(int res, int face, CoordIJK coord) { scratch.reset(coord.i, coord.j, coord.k); scratch.downAp7r(); } - scratch.reset(lastI - scratch.i, lastJ - scratch.j, lastK - scratch.k); + scratch.reset(Math.subtractExact(lastI, scratch.i), Math.subtractExact(lastJ, scratch.j), Math.subtractExact(lastK, scratch.k)); scratch.ijkNormalize(); h = H3Index.H3_set_index_digit(h, r, scratch.unitIjkToDigit()); } diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java b/libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java new file mode 100644 index 000000000000..61d767901ae0 --- /dev/null +++ b/libs/h3/src/main/java/org/elasticsearch/h3/FastMath.java @@ -0,0 +1,1055 @@ +/* + * @notice + * Copyright 2012 Jeff Hain + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + * Notice of fdlibm package this program is partially derived from: + * + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunSoft, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ============================================================================= + * + * This code sourced from: + * https://github.com/jeffhain/jafama/blob/d7d2a7659e96e148d827acc24cf385b872cda365/src/main/java/net/jafama/FastMath.java + */ + +package org.elasticsearch.h3; + +/** + * This file is forked from https://github.com/jeffhain/jafama. In particular, it forks the following file: + * https://github.com/jeffhain/jafama/blob/master/src/main/java/net/jafama/FastMath.java + * + * It modifies the original implementation by removing not needed methods leaving the following trigonometric function: + *
    + *
  • {@link #cos(double)}
  • + *
  • {@link #sin(double)}
  • + *
  • {@link #tan(double)}
  • + *
  • {@link #acos(double)}
  • + *
  • {@link #asin(double)}
  • + *
  • {@link #atan(double)}
  • + *
  • {@link #atan2(double, double)}
  • + *
+ */ +final class FastMath { + + /* + * For trigonometric functions, use of look-up tables and Taylor-Lagrange formula + * with 4 derivatives (more take longer to compute and don't add much accuracy, + * less require larger tables (which use more memory, take more time to initialize, + * and are slower to access (at least on the machine they were developed on))). + * + * For angles reduction of cos/sin/tan functions: + * - for small values, instead of reducing angles, and then computing the best index + * for look-up tables, we compute this index right away, and use it for reduction, + * - for large values, treatments derived from fdlibm package are used, as done in + * java.lang.Math. They are faster but still "slow", so if you work with + * large numbers and need speed over accuracy for them, you might want to use + * normalizeXXXFast treatments before your function, or modify cos/sin/tan + * so that they call the fast normalization treatments instead of the accurate ones. + * NB: If an angle is huge (like PI*1e20), in double precision format its last digits + * are zeros, which most likely is not the case for the intended value, and doing + * an accurate reduction on a very inaccurate value is most likely pointless. + * But it gives some sort of coherence that could be needed in some cases. + * + * Multiplication on double appears to be about as fast (or not much slower) than call + * to [], and regrouping some doubles in a private class, to use + * index only once, does not seem to speed things up, so: + * - for uniformly tabulated values, to retrieve the parameter corresponding to + * an index, we recompute it rather than using an array to store it, + * - for cos/sin, we recompute derivatives divided by (multiplied by inverse of) + * factorial each time, rather than storing them in arrays. + * + * Lengths of look-up tables are usually of the form 2^n+1, for their values to be + * of the form ( * k/2^n, k in 0 .. 2^n), so that particular values + * (PI/2, etc.) are "exactly" computed, as well as for other reasons. + * + * Most math treatments I could find on the web, including "fast" ones, + * usually take care of special cases (NaN, etc.) at the beginning, and + * then deal with the general case, which adds a useless overhead for the + * general (and common) case. In this class, special cases are only dealt + * with when needed, and if the general case does not already handle them. + */ + + // -------------------------------------------------------------------------- + // GENERAL CONSTANTS + // -------------------------------------------------------------------------- + + private static final double ONE_DIV_F2 = 1 / 2.0; + private static final double ONE_DIV_F3 = 1 / 6.0; + private static final double ONE_DIV_F4 = 1 / 24.0; + + private static final double TWO_POW_24 = Double.longBitsToDouble(0x4170000000000000L); + private static final double TWO_POW_N24 = Double.longBitsToDouble(0x3E70000000000000L); + + private static final double TWO_POW_66 = Double.longBitsToDouble(0x4410000000000000L); + + private static final int MIN_DOUBLE_EXPONENT = -1074; + private static final int MAX_DOUBLE_EXPONENT = 1023; + + // -------------------------------------------------------------------------- + // CONSTANTS FOR NORMALIZATIONS + // -------------------------------------------------------------------------- + + /* + * Table of constants for 1/(2*PI), 282 Hex digits (enough for normalizing doubles). + * 1/(2*PI) approximation = sum of ONE_OVER_TWOPI_TAB[i]*2^(-24*(i+1)). + */ + private static final double[] ONE_OVER_TWOPI_TAB = { + 0x28BE60, + 0xDB9391, + 0x054A7F, + 0x09D5F4, + 0x7D4D37, + 0x7036D8, + 0xA5664F, + 0x10E410, + 0x7F9458, + 0xEAF7AE, + 0xF1586D, + 0xC91B8E, + 0x909374, + 0xB80192, + 0x4BBA82, + 0x746487, + 0x3F877A, + 0xC72C4A, + 0x69CFBA, + 0x208D7D, + 0x4BAED1, + 0x213A67, + 0x1C09AD, + 0x17DF90, + 0x4E6475, + 0x8E60D4, + 0xCE7D27, + 0x2117E2, + 0xEF7E4A, + 0x0EC7FE, + 0x25FFF7, + 0x816603, + 0xFBCBC4, + 0x62D682, + 0x9B47DB, + 0x4D9FB3, + 0xC9F2C2, + 0x6DD3D1, + 0x8FD9A7, + 0x97FA8B, + 0x5D49EE, + 0xB1FAF9, + 0x7C5ECF, + 0x41CE7D, + 0xE294A4, + 0xBA9AFE, + 0xD7EC47 }; + + /* + * Constants for 2*PI. Only the 23 most significant bits of each mantissa are used. + * 2*PI approximation = sum of TWOPI_TAB. + */ + private static final double TWOPI_TAB0 = Double.longBitsToDouble(0x401921FB40000000L); + private static final double TWOPI_TAB1 = Double.longBitsToDouble(0x3E94442D00000000L); + private static final double TWOPI_TAB2 = Double.longBitsToDouble(0x3D18469880000000L); + private static final double TWOPI_TAB3 = Double.longBitsToDouble(0x3B98CC5160000000L); + private static final double TWOPI_TAB4 = Double.longBitsToDouble(0x3A101B8380000000L); + + private static final double INVPIO2 = Double.longBitsToDouble(0x3FE45F306DC9C883L); // 6.36619772367581382433e-01 53 bits of 2/pi + private static final double PIO2_HI = Double.longBitsToDouble(0x3FF921FB54400000L); // 1.57079632673412561417e+00 first 33 bits of pi/2 + private static final double PIO2_LO = Double.longBitsToDouble(0x3DD0B4611A626331L); // 6.07710050650619224932e-11 pi/2 - PIO2_HI + private static final double INVTWOPI = INVPIO2 / 4; + private static final double TWOPI_HI = 4 * PIO2_HI; + private static final double TWOPI_LO = 4 * PIO2_LO; + + // fdlibm uses 2^19*PI/2 here, but we normalize with % 2*PI instead of % PI/2, + // and we can bear some more error. + private static final double NORMALIZE_ANGLE_MAX_MEDIUM_DOUBLE = StrictMath.pow(2, 20) * (2 * Math.PI); + + // -------------------------------------------------------------------------- + // CONSTANTS AND TABLES FOR COS, SIN + // -------------------------------------------------------------------------- + + private static final int SIN_COS_TABS_SIZE = (1 << getTabSizePower(11)) + 1; + private static final double SIN_COS_DELTA_HI = TWOPI_HI / (SIN_COS_TABS_SIZE - 1); + private static final double SIN_COS_DELTA_LO = TWOPI_LO / (SIN_COS_TABS_SIZE - 1); + private static final double SIN_COS_INDEXER = 1 / (SIN_COS_DELTA_HI + SIN_COS_DELTA_LO); + private static final double[] sinTab = new double[SIN_COS_TABS_SIZE]; + private static final double[] cosTab = new double[SIN_COS_TABS_SIZE]; + + // Max abs value for fast modulo, above which we use regular angle normalization. + // This value must be < (Integer.MAX_VALUE / SIN_COS_INDEXER), to stay in range of int type. + // The higher it is, the higher the error, but also the faster it is for lower values. + // If you set it to ((Integer.MAX_VALUE / SIN_COS_INDEXER) * 0.99), worse accuracy on double range is about 1e-10. + private static final double SIN_COS_MAX_VALUE_FOR_INT_MODULO = ((Integer.MAX_VALUE >> 9) / SIN_COS_INDEXER) * 0.99; + + // -------------------------------------------------------------------------- + // CONSTANTS AND TABLES FOR TAN + // -------------------------------------------------------------------------- + + // We use the following formula: + // 1) tan(-x) = -tan(x) + // 2) tan(x) = 1/tan(PI/2-x) + // ---> we only have to compute tan(x) on [0,A] with PI/4<=A= 45deg, and supposed to be >= 51.4deg, as fdlibm code is not + // supposed to work with values inferior to that (51.4deg is about + // (PI/2-Double.longBitsToDouble(0x3FE5942800000000L))). + private static final double TAN_MAX_VALUE_FOR_TABS = Math.toRadians(77.0); + + private static final int TAN_TABS_SIZE = (int) ((TAN_MAX_VALUE_FOR_TABS / (Math.PI / 2)) * (TAN_VIRTUAL_TABS_SIZE - 1)) + 1; + private static final double TAN_DELTA_HI = PIO2_HI / (TAN_VIRTUAL_TABS_SIZE - 1); + private static final double TAN_DELTA_LO = PIO2_LO / (TAN_VIRTUAL_TABS_SIZE - 1); + private static final double TAN_INDEXER = 1 / (TAN_DELTA_HI + TAN_DELTA_LO); + private static final double[] tanTab = new double[TAN_TABS_SIZE]; + private static final double[] tanDer1DivF1Tab = new double[TAN_TABS_SIZE]; + private static final double[] tanDer2DivF2Tab = new double[TAN_TABS_SIZE]; + private static final double[] tanDer3DivF3Tab = new double[TAN_TABS_SIZE]; + private static final double[] tanDer4DivF4Tab = new double[TAN_TABS_SIZE]; + + // Max abs value for fast modulo, above which we use regular angle normalization. + // This value must be < (Integer.MAX_VALUE / TAN_INDEXER), to stay in range of int type. + // The higher it is, the higher the error, but also the faster it is for lower values. + private static final double TAN_MAX_VALUE_FOR_INT_MODULO = (((Integer.MAX_VALUE >> 9) / TAN_INDEXER) * 0.99); + + // -------------------------------------------------------------------------- + // CONSTANTS AND TABLES FOR ACOS, ASIN + // -------------------------------------------------------------------------- + + // We use the following formula: + // 1) acos(x) = PI/2 - asin(x) + // 2) asin(-x) = -asin(x) + // ---> we only have to compute asin(x) on [0,1]. + // For values not close to +-1, we use look-up tables; + // for values near +-1, we use code derived from fdlibm. + + // Supposed to be >= sin(77.2deg), as fdlibm code is supposed to work with values > 0.975, + // but seems to work well enough as long as value >= sin(25deg). + private static final double ASIN_MAX_VALUE_FOR_TABS = StrictMath.sin(Math.toRadians(73.0)); + + private static final int ASIN_TABS_SIZE = (1 << getTabSizePower(13)) + 1; + private static final double ASIN_DELTA = ASIN_MAX_VALUE_FOR_TABS / (ASIN_TABS_SIZE - 1); + private static final double ASIN_INDEXER = 1 / ASIN_DELTA; + private static final double[] asinTab = new double[ASIN_TABS_SIZE]; + private static final double[] asinDer1DivF1Tab = new double[ASIN_TABS_SIZE]; + private static final double[] asinDer2DivF2Tab = new double[ASIN_TABS_SIZE]; + private static final double[] asinDer3DivF3Tab = new double[ASIN_TABS_SIZE]; + private static final double[] asinDer4DivF4Tab = new double[ASIN_TABS_SIZE]; + + private static final double ASIN_MAX_VALUE_FOR_POWTABS = StrictMath.sin(Math.toRadians(88.6)); + private static final int ASIN_POWTABS_POWER = 84; + + private static final double ASIN_POWTABS_ONE_DIV_MAX_VALUE = 1 / ASIN_MAX_VALUE_FOR_POWTABS; + private static final int ASIN_POWTABS_SIZE = (1 << getTabSizePower(12)) + 1; + private static final int ASIN_POWTABS_SIZE_MINUS_ONE = ASIN_POWTABS_SIZE - 1; + private static final double[] asinParamPowTab = new double[ASIN_POWTABS_SIZE]; + private static final double[] asinPowTab = new double[ASIN_POWTABS_SIZE]; + private static final double[] asinDer1DivF1PowTab = new double[ASIN_POWTABS_SIZE]; + private static final double[] asinDer2DivF2PowTab = new double[ASIN_POWTABS_SIZE]; + private static final double[] asinDer3DivF3PowTab = new double[ASIN_POWTABS_SIZE]; + private static final double[] asinDer4DivF4PowTab = new double[ASIN_POWTABS_SIZE]; + + private static final double ASIN_PIO2_HI = Double.longBitsToDouble(0x3FF921FB54442D18L); // 1.57079632679489655800e+00 + private static final double ASIN_PIO2_LO = Double.longBitsToDouble(0x3C91A62633145C07L); // 6.12323399573676603587e-17 + private static final double ASIN_PS0 = Double.longBitsToDouble(0x3fc5555555555555L); // 1.66666666666666657415e-01 + private static final double ASIN_PS1 = Double.longBitsToDouble(0xbfd4d61203eb6f7dL); // -3.25565818622400915405e-01 + private static final double ASIN_PS2 = Double.longBitsToDouble(0x3fc9c1550e884455L); // 2.01212532134862925881e-01 + private static final double ASIN_PS3 = Double.longBitsToDouble(0xbfa48228b5688f3bL); // -4.00555345006794114027e-02 + private static final double ASIN_PS4 = Double.longBitsToDouble(0x3f49efe07501b288L); // 7.91534994289814532176e-04 + private static final double ASIN_PS5 = Double.longBitsToDouble(0x3f023de10dfdf709L); // 3.47933107596021167570e-05 + private static final double ASIN_QS1 = Double.longBitsToDouble(0xc0033a271c8a2d4bL); // -2.40339491173441421878e+00 + private static final double ASIN_QS2 = Double.longBitsToDouble(0x40002ae59c598ac8L); // 2.02094576023350569471e+00 + private static final double ASIN_QS3 = Double.longBitsToDouble(0xbfe6066c1b8d0159L); // -6.88283971605453293030e-01 + private static final double ASIN_QS4 = Double.longBitsToDouble(0x3fb3b8c5b12e9282L); // 7.70381505559019352791e-02 + + // -------------------------------------------------------------------------- + // CONSTANTS AND TABLES FOR ATAN + // -------------------------------------------------------------------------- + + // We use the formula atan(-x) = -atan(x) + // ---> we only have to compute atan(x) on [0,+infinity[. + // For values corresponding to angles not close to +-PI/2, we use look-up tables; + // for values corresponding to angles near +-PI/2, we use code derived from fdlibm. + + // Supposed to be >= tan(67.7deg), as fdlibm code is supposed to work with values > 2.4375. + private static final double ATAN_MAX_VALUE_FOR_TABS = StrictMath.tan(Math.toRadians(74.0)); + + private static final int ATAN_TABS_SIZE = (1 << getTabSizePower(12)) + 1; + private static final double ATAN_DELTA = ATAN_MAX_VALUE_FOR_TABS / (ATAN_TABS_SIZE - 1); + private static final double ATAN_INDEXER = 1 / ATAN_DELTA; + private static final double[] atanTab = new double[ATAN_TABS_SIZE]; + private static final double[] atanDer1DivF1Tab = new double[ATAN_TABS_SIZE]; + private static final double[] atanDer2DivF2Tab = new double[ATAN_TABS_SIZE]; + private static final double[] atanDer3DivF3Tab = new double[ATAN_TABS_SIZE]; + private static final double[] atanDer4DivF4Tab = new double[ATAN_TABS_SIZE]; + + private static final double ATAN_HI3 = Double.longBitsToDouble(0x3ff921fb54442d18L); // 1.57079632679489655800e+00 atan(inf)hi + private static final double ATAN_LO3 = Double.longBitsToDouble(0x3c91a62633145c07L); // 6.12323399573676603587e-17 atan(inf)lo + private static final double ATAN_AT0 = Double.longBitsToDouble(0x3fd555555555550dL); // 3.33333333333329318027e-01 + private static final double ATAN_AT1 = Double.longBitsToDouble(0xbfc999999998ebc4L); // -1.99999999998764832476e-01 + private static final double ATAN_AT2 = Double.longBitsToDouble(0x3fc24924920083ffL); // 1.42857142725034663711e-01 + private static final double ATAN_AT3 = Double.longBitsToDouble(0xbfbc71c6fe231671L); // -1.11111104054623557880e-01 + private static final double ATAN_AT4 = Double.longBitsToDouble(0x3fb745cdc54c206eL); // 9.09088713343650656196e-02 + private static final double ATAN_AT5 = Double.longBitsToDouble(0xbfb3b0f2af749a6dL); // -7.69187620504482999495e-02 + private static final double ATAN_AT6 = Double.longBitsToDouble(0x3fb10d66a0d03d51L); // 6.66107313738753120669e-02 + private static final double ATAN_AT7 = Double.longBitsToDouble(0xbfadde2d52defd9aL); // -5.83357013379057348645e-02 + private static final double ATAN_AT8 = Double.longBitsToDouble(0x3fa97b4b24760debL); // 4.97687799461593236017e-02 + private static final double ATAN_AT9 = Double.longBitsToDouble(0xbfa2b4442c6a6c2fL); // -3.65315727442169155270e-02 + private static final double ATAN_AT10 = Double.longBitsToDouble(0x3f90ad3ae322da11L); // 1.62858201153657823623e-02 + + // -------------------------------------------------------------------------- + // TABLE FOR POWERS OF TWO + // -------------------------------------------------------------------------- + + private static final double[] twoPowTab = new double[(MAX_DOUBLE_EXPONENT - MIN_DOUBLE_EXPONENT) + 1]; + + // -------------------------------------------------------------------------- + // PUBLIC TREATMENTS + // -------------------------------------------------------------------------- + + /** + * @param angle Angle in radians. + * @return Angle cosine. + */ + public static double cos(double angle) { + angle = Math.abs(angle); + if (angle > SIN_COS_MAX_VALUE_FOR_INT_MODULO) { + // Faster than using normalizeZeroTwoPi. + angle = remainderTwoPi(angle); + if (angle < 0.0) { + angle += 2 * Math.PI; + } + } + // index: possibly outside tables range. + int index = (int) (angle * SIN_COS_INDEXER + 0.5); + double delta = (angle - index * SIN_COS_DELTA_HI) - index * SIN_COS_DELTA_LO; + // Making sure index is within tables range. + // Last value of each table is the same than first, so we ignore it (tabs size minus one) for modulo. + index &= (SIN_COS_TABS_SIZE - 2); // index % (SIN_COS_TABS_SIZE-1) + double indexCos = cosTab[index]; + double indexSin = sinTab[index]; + return indexCos + delta * (-indexSin + delta * (-indexCos * ONE_DIV_F2 + delta * (indexSin * ONE_DIV_F3 + delta * indexCos + * ONE_DIV_F4))); + } + + /** + * @param angle Angle in radians. + * @return Angle sine. + */ + public static double sin(double angle) { + boolean negateResult; + if (angle < 0.0) { + angle = -angle; + negateResult = true; + } else { + negateResult = false; + } + if (angle > SIN_COS_MAX_VALUE_FOR_INT_MODULO) { + // Faster than using normalizeZeroTwoPi. + angle = remainderTwoPi(angle); + if (angle < 0.0) { + angle += 2 * Math.PI; + } + } + int index = (int) (angle * SIN_COS_INDEXER + 0.5); + double delta = (angle - index * SIN_COS_DELTA_HI) - index * SIN_COS_DELTA_LO; + index &= (SIN_COS_TABS_SIZE - 2); // index % (SIN_COS_TABS_SIZE-1) + double indexSin = sinTab[index]; + double indexCos = cosTab[index]; + double result = indexSin + delta * (indexCos + delta * (-indexSin * ONE_DIV_F2 + delta * (-indexCos * ONE_DIV_F3 + delta * indexSin + * ONE_DIV_F4))); + return negateResult ? -result : result; + } + + /** + * @param angle Angle in radians. + * @return Angle tangent. + */ + public static double tan(double angle) { + if (Math.abs(angle) > TAN_MAX_VALUE_FOR_INT_MODULO) { + // Faster than using normalizeMinusHalfPiHalfPi. + angle = remainderTwoPi(angle); + if (angle < -Math.PI / 2) { + angle += Math.PI; + } else if (angle > Math.PI / 2) { + angle -= Math.PI; + } + } + boolean negateResult; + if (angle < 0.0) { + angle = -angle; + negateResult = true; + } else { + negateResult = false; + } + int index = (int) (angle * TAN_INDEXER + 0.5); + double delta = (angle - index * TAN_DELTA_HI) - index * TAN_DELTA_LO; + // index modulo PI, i.e. 2*(virtual tab size minus one). + index &= (2 * (TAN_VIRTUAL_TABS_SIZE - 1) - 1); // index % (2*(TAN_VIRTUAL_TABS_SIZE-1)) + // Here, index is in [0,2*(TAN_VIRTUAL_TABS_SIZE-1)-1], i.e. indicates an angle in [0,PI[. + if (index > (TAN_VIRTUAL_TABS_SIZE - 1)) { + index = (2 * (TAN_VIRTUAL_TABS_SIZE - 1)) - index; + delta = -delta; + negateResult = negateResult == false; + } + double result; + if (index < TAN_TABS_SIZE) { + result = tanTab[index] + delta * (tanDer1DivF1Tab[index] + delta * (tanDer2DivF2Tab[index] + delta * (tanDer3DivF3Tab[index] + + delta * tanDer4DivF4Tab[index]))); + } else { // angle in ]TAN_MAX_VALUE_FOR_TABS,TAN_MAX_VALUE_FOR_INT_MODULO], or angle is NaN + // Using tan(angle) == 1/tan(PI/2-angle) formula: changing angle (index and delta), and inverting. + index = (TAN_VIRTUAL_TABS_SIZE - 1) - index; + result = 1 / (tanTab[index] - delta * (tanDer1DivF1Tab[index] - delta * (tanDer2DivF2Tab[index] - delta + * (tanDer3DivF3Tab[index] - delta * tanDer4DivF4Tab[index])))); + } + return negateResult ? -result : result; + } + + /** + * @param value Value in [-1,1]. + * @return Value arccosine, in radians, in [0,PI]. + */ + public static double acos(double value) { + return Math.PI / 2 - FastMath.asin(value); + } + + /** + * @param value Value in [-1,1]. + * @return Value arcsine, in radians, in [-PI/2,PI/2]. + */ + public static double asin(double value) { + boolean negateResult; + if (value < 0.0) { + value = -value; + negateResult = true; + } else { + negateResult = false; + } + if (value <= ASIN_MAX_VALUE_FOR_TABS) { + int index = (int) (value * ASIN_INDEXER + 0.5); + double delta = value - index * ASIN_DELTA; + double result = asinTab[index] + delta * (asinDer1DivF1Tab[index] + delta * (asinDer2DivF2Tab[index] + delta + * (asinDer3DivF3Tab[index] + delta * asinDer4DivF4Tab[index]))); + return negateResult ? -result : result; + } else if (value <= ASIN_MAX_VALUE_FOR_POWTABS) { + int index = (int) (FastMath.powFast(value * ASIN_POWTABS_ONE_DIV_MAX_VALUE, ASIN_POWTABS_POWER) * ASIN_POWTABS_SIZE_MINUS_ONE + + 0.5); + double delta = value - asinParamPowTab[index]; + double result = asinPowTab[index] + delta * (asinDer1DivF1PowTab[index] + delta * (asinDer2DivF2PowTab[index] + delta + * (asinDer3DivF3PowTab[index] + delta * asinDer4DivF4PowTab[index]))); + return negateResult ? -result : result; + } else { // value > ASIN_MAX_VALUE_FOR_TABS, or value is NaN + // This part is derived from fdlibm. + if (value < 1.0) { + double t = (1.0 - value) * 0.5; + double p = t * (ASIN_PS0 + t * (ASIN_PS1 + t * (ASIN_PS2 + t * (ASIN_PS3 + t * (ASIN_PS4 + t * ASIN_PS5))))); + double q = 1.0 + t * (ASIN_QS1 + t * (ASIN_QS2 + t * (ASIN_QS3 + t * ASIN_QS4))); + double s = Math.sqrt(t); + double z = s + s * (p / q); + double result = ASIN_PIO2_HI - ((z + z) - ASIN_PIO2_LO); + return negateResult ? -result : result; + } else { // value >= 1.0, or value is NaN + if (value == 1.0) { + return negateResult ? -Math.PI / 2 : Math.PI / 2; + } else { + return Double.NaN; + } + } + } + } + + /** + * @param value A double value. + * @return Value arctangent, in radians, in [-PI/2,PI/2]. + */ + public static double atan(double value) { + boolean negateResult; + if (value < 0.0) { + value = -value; + negateResult = true; + } else { + negateResult = false; + } + if (value == 1.0) { + // We want "exact" result for 1.0. + return negateResult ? -Math.PI / 4 : Math.PI / 4; + } else if (value <= ATAN_MAX_VALUE_FOR_TABS) { + int index = (int) (value * ATAN_INDEXER + 0.5); + double delta = value - index * ATAN_DELTA; + double result = atanTab[index] + delta * (atanDer1DivF1Tab[index] + delta * (atanDer2DivF2Tab[index] + delta + * (atanDer3DivF3Tab[index] + delta * atanDer4DivF4Tab[index]))); + return negateResult ? -result : result; + } else { // value > ATAN_MAX_VALUE_FOR_TABS, or value is NaN + // This part is derived from fdlibm. + if (value < TWO_POW_66) { + double x = -1 / value; + double x2 = x * x; + double x4 = x2 * x2; + double s1 = x2 * (ATAN_AT0 + x4 * (ATAN_AT2 + x4 * (ATAN_AT4 + x4 * (ATAN_AT6 + x4 * (ATAN_AT8 + x4 * ATAN_AT10))))); + double s2 = x4 * (ATAN_AT1 + x4 * (ATAN_AT3 + x4 * (ATAN_AT5 + x4 * (ATAN_AT7 + x4 * ATAN_AT9)))); + double result = ATAN_HI3 - ((x * (s1 + s2) - ATAN_LO3) - x); + return negateResult ? -result : result; + } else { // value >= 2^66, or value is NaN + if (Double.isNaN(value)) { + return Double.NaN; + } else { + return negateResult ? -Math.PI / 2 : Math.PI / 2; + } + } + } + } + + /** + * For special values for which multiple conventions could be adopted, behaves like Math.atan2(double,double). + * + * @param y Coordinate on y axis. + * @param x Coordinate on x axis. + * @return Angle from x axis positive side to (x,y) position, in radians, in [-PI,PI]. + * Angle measure is positive when going from x axis to y axis (positive sides). + */ + public static double atan2(double y, double x) { + if (x > 0.0) { + if (y == 0.0) { + return (1 / y == Double.NEGATIVE_INFINITY) ? -0.0 : 0.0; + } + if (x == Double.POSITIVE_INFINITY) { + if (y == Double.POSITIVE_INFINITY) { + return Math.PI / 4; + } else if (y == Double.NEGATIVE_INFINITY) { + return -Math.PI / 4; + } else if (y > 0.0) { + return 0.0; + } else if (y < 0.0) { + return -0.0; + } else { + return Double.NaN; + } + } else { + return FastMath.atan(y / x); + } + } else if (x < 0.0) { + if (y == 0.0) { + return (1 / y == Double.NEGATIVE_INFINITY) ? -Math.PI : Math.PI; + } + if (x == Double.NEGATIVE_INFINITY) { + if (y == Double.POSITIVE_INFINITY) { + return 3 * Math.PI / 4; + } else if (y == Double.NEGATIVE_INFINITY) { + return -3 * Math.PI / 4; + } else if (y > 0.0) { + return Math.PI; + } else if (y < 0.0) { + return -Math.PI; + } else { + return Double.NaN; + } + } else if (y > 0.0) { + return Math.PI / 2 + FastMath.atan(-x / y); + } else if (y < 0.0) { + return -Math.PI / 2 - FastMath.atan(x / y); + } else { + return Double.NaN; + } + } else if (x == 0.0) { + if (y == 0.0) { + if (1 / x == Double.NEGATIVE_INFINITY) { + return (1 / y == Double.NEGATIVE_INFINITY) ? -Math.PI : Math.PI; + } else { + return (1 / y == Double.NEGATIVE_INFINITY) ? -0.0 : 0.0; + } + } + if (y > 0.0) { + return Math.PI / 2; + } else if (y < 0.0) { + return -Math.PI / 2; + } else { + return Double.NaN; + } + } else { + return Double.NaN; + } + } + + /** + * This treatment is somehow accurate for low values of |power|, + * and for |power*getExponent(value)| < 1023 or so (to stay away + * from double extreme magnitudes (large and small)). + * + * @param value A double value. + * @param power A power. + * @return value^power. + */ + private static double powFast(double value, int power) { + if (power > 5) { // Most common case first. + double oddRemains = 1.0; + do { + // Test if power is odd. + if ((power & 1) != 0) { + oddRemains *= value; + } + value *= value; + power >>= 1; // power = power / 2 + } while (power > 5); + // Here, power is in [3,5]: faster to finish outside the loop. + if (power == 3) { + return oddRemains * value * value * value; + } else { + double v2 = value * value; + if (power == 4) { + return oddRemains * v2 * v2; + } else { // power == 5 + return oddRemains * v2 * v2 * value; + } + } + } else if (power >= 0) { // power in [0,5] + if (power < 3) { // power in [0,2] + if (power == 2) { // Most common case first. + return value * value; + } else if (power != 0) { // faster than == 1 + return value; + } else { // power == 0 + return 1.0; + } + } else { // power in [3,5] + if (power == 3) { + return value * value * value; + } else { // power in [4,5] + double v2 = value * value; + if (power == 4) { + return v2 * v2; + } else { // power == 5 + return v2 * v2 * value; + } + } + } + } else { // power < 0 + // Opposite of Integer.MIN_VALUE does not exist as int. + if (power == Integer.MIN_VALUE) { + // Integer.MAX_VALUE = -(power+1) + return 1.0 / (FastMath.powFast(value, Integer.MAX_VALUE) * value); + } else { + return 1.0 / FastMath.powFast(value, -power); + } + } + } + + // -------------------------------------------------------------------------- + // PRIVATE TREATMENTS + // -------------------------------------------------------------------------- + + /** + * FastMath is non-instantiable. + */ + private FastMath() {} + + /** + * Use look-up tables size power through this method, + * to make sure is it small in case java.lang.Math + * is directly used. + */ + private static int getTabSizePower(int tabSizePower) { + return tabSizePower; + } + + /** + * Remainder using an accurate definition of PI. + * Derived from a fdlibm treatment called __ieee754_rem_pio2. + * + * This method can return values slightly (like one ULP or so) outside [-Math.PI,Math.PI] range. + * + * @param angle Angle in radians. + * @return Remainder of (angle % (2*PI)), which is in [-PI,PI] range. + */ + private static double remainderTwoPi(double angle) { + boolean negateResult; + if (angle < 0.0) { + negateResult = true; + angle = -angle; + } else { + negateResult = false; + } + if (angle <= NORMALIZE_ANGLE_MAX_MEDIUM_DOUBLE) { + double fn = (double) (int) (angle * INVTWOPI + 0.5); + double result = (angle - fn * TWOPI_HI) - fn * TWOPI_LO; + return negateResult ? -result : result; + } else if (angle < Double.POSITIVE_INFINITY) { + // Reworking exponent to have a value < 2^24. + long lx = Double.doubleToRawLongBits(angle); + long exp = ((lx >> 52) & 0x7FF) - 1046; + double z = Double.longBitsToDouble(lx - (exp << 52)); + + double x0 = (double) ((int) z); + z = (z - x0) * TWO_POW_24; + double x1 = (double) ((int) z); + double x2 = (z - x1) * TWO_POW_24; + + double result = subRemainderTwoPi(x0, x1, x2, (int) exp, (x2 == 0) ? 2 : 3); + return negateResult ? -result : result; + } else { // angle is +infinity or NaN + return Double.NaN; + } + } + + /** + * Remainder using an accurate definition of PI. + * Derived from a fdlibm treatment called __kernel_rem_pio2. + * + * @param x0 Most significant part of the value, as an integer < 2^24, in double precision format. Must be >= 0. + * @param x1 Following significant part of the value, as an integer < 2^24, in double precision format. + * @param x2 Least significant part of the value, as an integer < 2^24, in double precision format. + * @param e0 Exponent of x0 (value is (2^e0)*(x0+(2^-24)*(x1+(2^-24)*x2))). Must be ≥ -20. + * @param nx Number of significant parts to take into account. Must be 2 or 3. + * @return Remainder of (value % (2*PI)), which is in [-PI,PI] range. + */ + private static double subRemainderTwoPi(double x0, double x1, double x2, int e0, int nx) { + int ih; + double z, fw; + double f0, f1, f2, f3, f4, f5, f6 = 0.0, f7; + double q0, q1, q2, q3, q4, q5; + int iq0, iq1, iq2, iq3, iq4; + + final int jx = nx - 1; // jx in [1,2] (nx in [2,3]) + // Could use a table to avoid division, but the gain isn't worth it most likely... + final int jv = (e0 - 3) / 24; // We do not handle the case (e0-3 < -23). + int q = e0 - ((jv << 4) + (jv << 3)) - 24; // e0-24*(jv+1) + + final int j = jv + 4; + if (jx == 1) { + f5 = (j >= 0) ? ONE_OVER_TWOPI_TAB[j] : 0.0; + f4 = (j >= 1) ? ONE_OVER_TWOPI_TAB[j - 1] : 0.0; + f3 = (j >= 2) ? ONE_OVER_TWOPI_TAB[j - 2] : 0.0; + f2 = (j >= 3) ? ONE_OVER_TWOPI_TAB[j - 3] : 0.0; + f1 = (j >= 4) ? ONE_OVER_TWOPI_TAB[j - 4] : 0.0; + f0 = (j >= 5) ? ONE_OVER_TWOPI_TAB[j - 5] : 0.0; + + q0 = x0 * f1 + x1 * f0; + q1 = x0 * f2 + x1 * f1; + q2 = x0 * f3 + x1 * f2; + q3 = x0 * f4 + x1 * f3; + q4 = x0 * f5 + x1 * f4; + } else { // jx == 2 + f6 = (j >= 0) ? ONE_OVER_TWOPI_TAB[j] : 0.0; + f5 = (j >= 1) ? ONE_OVER_TWOPI_TAB[j - 1] : 0.0; + f4 = (j >= 2) ? ONE_OVER_TWOPI_TAB[j - 2] : 0.0; + f3 = (j >= 3) ? ONE_OVER_TWOPI_TAB[j - 3] : 0.0; + f2 = (j >= 4) ? ONE_OVER_TWOPI_TAB[j - 4] : 0.0; + f1 = (j >= 5) ? ONE_OVER_TWOPI_TAB[j - 5] : 0.0; + f0 = (j >= 6) ? ONE_OVER_TWOPI_TAB[j - 6] : 0.0; + + q0 = x0 * f2 + x1 * f1 + x2 * f0; + q1 = x0 * f3 + x1 * f2 + x2 * f1; + q2 = x0 * f4 + x1 * f3 + x2 * f2; + q3 = x0 * f5 + x1 * f4 + x2 * f3; + q4 = x0 * f6 + x1 * f5 + x2 * f4; + } + + z = q4; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq0 = (int) (z - TWO_POW_24 * fw); + z = q3 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq1 = (int) (z - TWO_POW_24 * fw); + z = q2 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq2 = (int) (z - TWO_POW_24 * fw); + z = q1 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq3 = (int) (z - TWO_POW_24 * fw); + z = q0 + fw; + + // Here, q is in [-25,2] range or so, so we can use the table right away. + double twoPowQ = twoPowTab[q - MIN_DOUBLE_EXPONENT]; + + z = (z * twoPowQ) % 8.0; + z -= (double) ((int) z); + if (q > 0) { + iq3 &= 0xFFFFFF >> q; + ih = iq3 >> (23 - q); + } else if (q == 0) { + ih = iq3 >> 23; + } else if (z >= 0.5) { + ih = 2; + } else { + ih = 0; + } + if (ih > 0) { + int carry; + if (iq0 != 0) { + carry = 1; + iq0 = 0x1000000 - iq0; + iq1 = 0x0FFFFFF - iq1; + iq2 = 0x0FFFFFF - iq2; + iq3 = 0x0FFFFFF - iq3; + } else { + if (iq1 != 0) { + carry = 1; + iq1 = 0x1000000 - iq1; + iq2 = 0x0FFFFFF - iq2; + iq3 = 0x0FFFFFF - iq3; + } else { + if (iq2 != 0) { + carry = 1; + iq2 = 0x1000000 - iq2; + iq3 = 0x0FFFFFF - iq3; + } else { + if (iq3 != 0) { + carry = 1; + iq3 = 0x1000000 - iq3; + } else { + carry = 0; + } + } + } + } + if (q > 0) { + switch (q) { + case 1 -> iq3 &= 0x7FFFFF; + case 2 -> iq3 &= 0x3FFFFF; + } + } + if (ih == 2) { + z = 1.0 - z; + if (carry != 0) { + z -= twoPowQ; + } + } + } + + if (z == 0.0) { + if (jx == 1) { + f6 = ONE_OVER_TWOPI_TAB[jv + 5]; + q5 = x0 * f6 + x1 * f5; + } else { // jx == 2 + f7 = ONE_OVER_TWOPI_TAB[jv + 5]; + q5 = x0 * f7 + x1 * f6 + x2 * f5; + } + + z = q5; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq0 = (int) (z - TWO_POW_24 * fw); + z = q4 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq1 = (int) (z - TWO_POW_24 * fw); + z = q3 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq2 = (int) (z - TWO_POW_24 * fw); + z = q2 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq3 = (int) (z - TWO_POW_24 * fw); + z = q1 + fw; + fw = (double) ((int) (TWO_POW_N24 * z)); + iq4 = (int) (z - TWO_POW_24 * fw); + z = q0 + fw; + + z = (z * twoPowQ) % 8.0; + z -= (double) ((int) z); + if (q > 0) { + // some parentheses for Eclipse formatter's weaknesses with bits shifts + iq4 &= (0xFFFFFF >> q); + ih = (iq4 >> (23 - q)); + } else if (q == 0) { + ih = iq4 >> 23; + } else if (z >= 0.5) { + ih = 2; + } else { + ih = 0; + } + if (ih > 0) { + if (iq0 != 0) { + iq0 = 0x1000000 - iq0; + iq1 = 0x0FFFFFF - iq1; + iq2 = 0x0FFFFFF - iq2; + iq3 = 0x0FFFFFF - iq3; + iq4 = 0x0FFFFFF - iq4; + } else { + if (iq1 != 0) { + iq1 = 0x1000000 - iq1; + iq2 = 0x0FFFFFF - iq2; + iq3 = 0x0FFFFFF - iq3; + iq4 = 0x0FFFFFF - iq4; + } else { + if (iq2 != 0) { + iq2 = 0x1000000 - iq2; + iq3 = 0x0FFFFFF - iq3; + iq4 = 0x0FFFFFF - iq4; + } else { + if (iq3 != 0) { + iq3 = 0x1000000 - iq3; + iq4 = 0x0FFFFFF - iq4; + } else { + if (iq4 != 0) { + iq4 = 0x1000000 - iq4; + } + } + } + } + } + if (q > 0) { + switch (q) { + case 1 -> iq4 &= 0x7FFFFF; + case 2 -> iq4 &= 0x3FFFFF; + } + } + } + fw = twoPowQ * TWO_POW_N24; // q -= 24, so initializing fw with ((2^q)*(2^-24)=2^(q-24)) + } else { + // Here, q is in [-25,-2] range or so, so we could use twoPow's table right away with + // iq4 = (int)(z*twoPowTab[-q-TWO_POW_TAB_MIN_POW]); + // but tests show using division is faster... + iq4 = (int) (z / twoPowQ); + fw = twoPowQ; + } + + q4 = fw * (double) iq4; + fw *= TWO_POW_N24; + q3 = fw * (double) iq3; + fw *= TWO_POW_N24; + q2 = fw * (double) iq2; + fw *= TWO_POW_N24; + q1 = fw * (double) iq1; + fw *= TWO_POW_N24; + q0 = fw * (double) iq0; + fw *= TWO_POW_N24; + + fw = TWOPI_TAB0 * q4; + fw += TWOPI_TAB0 * q3 + TWOPI_TAB1 * q4; + fw += TWOPI_TAB0 * q2 + TWOPI_TAB1 * q3 + TWOPI_TAB2 * q4; + fw += TWOPI_TAB0 * q1 + TWOPI_TAB1 * q2 + TWOPI_TAB2 * q3 + TWOPI_TAB3 * q4; + fw += TWOPI_TAB0 * q0 + TWOPI_TAB1 * q1 + TWOPI_TAB2 * q2 + TWOPI_TAB3 * q3 + TWOPI_TAB4 * q4; + + return (ih == 0) ? fw : -fw; + } + + // -------------------------------------------------------------------------- + // STATIC INITIALIZATIONS + // -------------------------------------------------------------------------- + + /** + * Initializes look-up tables. + * + * Might use some FastMath methods in there, not to spend + * an hour in it, but must take care not to use methods + * which look-up tables have not yet been initialized, + * or that are not accurate enough. + */ + static { + + // sin and cos + + final int SIN_COS_PI_INDEX = (SIN_COS_TABS_SIZE - 1) / 2; + final int SIN_COS_PI_MUL_2_INDEX = 2 * SIN_COS_PI_INDEX; + final int SIN_COS_PI_MUL_0_5_INDEX = SIN_COS_PI_INDEX / 2; + final int SIN_COS_PI_MUL_1_5_INDEX = 3 * SIN_COS_PI_INDEX / 2; + for (int i = 0; i < SIN_COS_TABS_SIZE; i++) { + // angle: in [0,2*PI]. + double angle = i * SIN_COS_DELTA_HI + i * SIN_COS_DELTA_LO; + double sinAngle = StrictMath.sin(angle); + double cosAngle = StrictMath.cos(angle); + // For indexes corresponding to null cosine or sine, we make sure the value is zero + // and not an epsilon. This allows for a much better accuracy for results close to zero. + if (i == SIN_COS_PI_INDEX) { + sinAngle = 0.0; + } else if (i == SIN_COS_PI_MUL_2_INDEX) { + sinAngle = 0.0; + } else if (i == SIN_COS_PI_MUL_0_5_INDEX) { + cosAngle = 0.0; + } else if (i == SIN_COS_PI_MUL_1_5_INDEX) { + cosAngle = 0.0; + } + sinTab[i] = sinAngle; + cosTab[i] = cosAngle; + } + + // tan + + for (int i = 0; i < TAN_TABS_SIZE; i++) { + // angle: in [0,TAN_MAX_VALUE_FOR_TABS]. + double angle = i * TAN_DELTA_HI + i * TAN_DELTA_LO; + tanTab[i] = StrictMath.tan(angle); + double cosAngle = StrictMath.cos(angle); + double sinAngle = StrictMath.sin(angle); + double cosAngleInv = 1 / cosAngle; + double cosAngleInv2 = cosAngleInv * cosAngleInv; + double cosAngleInv3 = cosAngleInv2 * cosAngleInv; + double cosAngleInv4 = cosAngleInv2 * cosAngleInv2; + double cosAngleInv5 = cosAngleInv3 * cosAngleInv2; + tanDer1DivF1Tab[i] = cosAngleInv2; + tanDer2DivF2Tab[i] = ((2 * sinAngle) * cosAngleInv3) * ONE_DIV_F2; + tanDer3DivF3Tab[i] = ((2 * (1 + 2 * sinAngle * sinAngle)) * cosAngleInv4) * ONE_DIV_F3; + tanDer4DivF4Tab[i] = ((8 * sinAngle * (2 + sinAngle * sinAngle)) * cosAngleInv5) * ONE_DIV_F4; + } + + // asin + + for (int i = 0; i < ASIN_TABS_SIZE; i++) { + // x: in [0,ASIN_MAX_VALUE_FOR_TABS]. + double x = i * ASIN_DELTA; + asinTab[i] = StrictMath.asin(x); + double oneMinusXSqInv = 1.0 / (1 - x * x); + double oneMinusXSqInv0_5 = StrictMath.sqrt(oneMinusXSqInv); + double oneMinusXSqInv1_5 = oneMinusXSqInv0_5 * oneMinusXSqInv; + double oneMinusXSqInv2_5 = oneMinusXSqInv1_5 * oneMinusXSqInv; + double oneMinusXSqInv3_5 = oneMinusXSqInv2_5 * oneMinusXSqInv; + asinDer1DivF1Tab[i] = oneMinusXSqInv0_5; + asinDer2DivF2Tab[i] = (x * oneMinusXSqInv1_5) * ONE_DIV_F2; + asinDer3DivF3Tab[i] = ((1 + 2 * x * x) * oneMinusXSqInv2_5) * ONE_DIV_F3; + asinDer4DivF4Tab[i] = ((5 + 2 * x * (2 + x * (5 - 2 * x))) * oneMinusXSqInv3_5) * ONE_DIV_F4; + } + + for (int i = 0; i < ASIN_POWTABS_SIZE; i++) { + // x: in [0,ASIN_MAX_VALUE_FOR_POWTABS]. + double x = StrictMath.pow(i * (1.0 / ASIN_POWTABS_SIZE_MINUS_ONE), 1.0 / ASIN_POWTABS_POWER) * ASIN_MAX_VALUE_FOR_POWTABS; + asinParamPowTab[i] = x; + asinPowTab[i] = StrictMath.asin(x); + double oneMinusXSqInv = 1.0 / (1 - x * x); + double oneMinusXSqInv0_5 = StrictMath.sqrt(oneMinusXSqInv); + double oneMinusXSqInv1_5 = oneMinusXSqInv0_5 * oneMinusXSqInv; + double oneMinusXSqInv2_5 = oneMinusXSqInv1_5 * oneMinusXSqInv; + double oneMinusXSqInv3_5 = oneMinusXSqInv2_5 * oneMinusXSqInv; + asinDer1DivF1PowTab[i] = oneMinusXSqInv0_5; + asinDer2DivF2PowTab[i] = (x * oneMinusXSqInv1_5) * ONE_DIV_F2; + asinDer3DivF3PowTab[i] = ((1 + 2 * x * x) * oneMinusXSqInv2_5) * ONE_DIV_F3; + asinDer4DivF4PowTab[i] = ((5 + 2 * x * (2 + x * (5 - 2 * x))) * oneMinusXSqInv3_5) * ONE_DIV_F4; + } + + // atan + + for (int i = 0; i < ATAN_TABS_SIZE; i++) { + // x: in [0,ATAN_MAX_VALUE_FOR_TABS]. + double x = i * ATAN_DELTA; + double onePlusXSqInv = 1.0 / (1 + x * x); + double onePlusXSqInv2 = onePlusXSqInv * onePlusXSqInv; + double onePlusXSqInv3 = onePlusXSqInv2 * onePlusXSqInv; + double onePlusXSqInv4 = onePlusXSqInv2 * onePlusXSqInv2; + atanTab[i] = StrictMath.atan(x); + atanDer1DivF1Tab[i] = onePlusXSqInv; + atanDer2DivF2Tab[i] = (-2 * x * onePlusXSqInv2) * ONE_DIV_F2; + atanDer3DivF3Tab[i] = ((-2 + 6 * x * x) * onePlusXSqInv3) * ONE_DIV_F3; + atanDer4DivF4Tab[i] = ((24 * x * (1 - x * x)) * onePlusXSqInv4) * ONE_DIV_F4; + } + + // twoPow + + for (int i = MIN_DOUBLE_EXPONENT; i <= MAX_DOUBLE_EXPONENT; i++) { + twoPowTab[i - MIN_DOUBLE_EXPONENT] = StrictMath.pow(2.0, i); + } + } +} diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/H3.java b/libs/h3/src/main/java/org/elasticsearch/h3/H3.java index a27799855239..46bcc3f141dd 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/H3.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/H3.java @@ -33,6 +33,15 @@ public final class H3 { public static int MAX_H3_RES = Constants.MAX_H3_RES; + private static final long[] NORTH = new long[MAX_H3_RES + 1]; + private static final long[] SOUTH = new long[MAX_H3_RES + 1]; + static { + for (int res = 0; res <= H3.MAX_H3_RES; res++) { + NORTH[res] = H3.geoToH3(90, 0, res); + SOUTH[res] = H3.geoToH3(-90, 0, res); + } + } + /** * Converts from long representation of an index to String representation. */ @@ -229,13 +238,9 @@ public static String h3ToParent(String h3Address) { * Returns the children of the given index. */ public static long[] h3ToChildren(long h3) { - long[] children = new long[cellToChildrenSize(h3)]; - int res = H3Index.H3_get_resolution(h3); - Iterator.IterCellsChildren it = Iterator.iterInitParent(h3, res + 1); - int pos = 0; - while (it.h != Iterator.H3_NULL) { - children[pos++] = it.h; - Iterator.iterStepChild(it); + final long[] children = new long[h3ToChildrenSize(h3)]; + for (int i = 0; i < children.length; i++) { + children[i] = childPosToH3(h3, i); } return children; } @@ -248,6 +253,96 @@ public static String[] h3ToChildren(String h3Address) { return h3ToStringList(h3ToChildren(stringToH3(h3Address))); } + /** + * Returns the child cell at the given position + */ + public static long childPosToH3(long h3, int childPos) { + final int childrenRes = H3Index.H3_get_resolution(h3) + 1; + if (childrenRes > MAX_H3_RES) { + throw new IllegalArgumentException("Resolution overflow"); + } + final long childH = H3Index.H3_set_resolution(h3, childrenRes); + if (childPos == 0) { + return H3Index.H3_set_index_digit(childH, childrenRes, CoordIJK.Direction.CENTER_DIGIT.digit()); + } + final boolean isPentagon = isPentagon(h3); + final int maxPos = isPentagon ? 5 : 6; + if (childPos < 0 || childPos > maxPos) { + throw new IllegalArgumentException("invalid child position"); + } + if (isPentagon) { + // Pentagon skip digit (position) is the number 1, therefore we add one + // to the current position. + return H3Index.H3_set_index_digit(childH, childrenRes, childPos + 1); + } else { + return H3Index.H3_set_index_digit(childH, childrenRes, childPos); + } + } + + /** + * Returns the child address at the given position + */ + public static String childPosToH3(String h3Address, int childPos) { + return h3ToString(childPosToH3(stringToH3(h3Address), childPos)); + } + + private static final int[] PEN_INTERSECTING_CHILDREN_DIRECTIONS = new int[] { 3, 1, 6, 4, 2 }; + private static final int[] HEX_INTERSECTING_CHILDREN_DIRECTIONS = new int[] { 3, 6, 2, 5, 1, 4 }; + + /** + * Returns the h3 bins on the level below which are not children of the given H3 index but + * intersects with it. + */ + public static long[] h3ToNoChildrenIntersecting(long h3) { + final boolean isPentagon = isPentagon(h3); + final long[] noChildren = new long[isPentagon ? 5 : 6]; + for (int i = 0; i < noChildren.length; i++) { + noChildren[i] = noChildIntersectingPosToH3(h3, i); + } + return noChildren; + } + + /** + * Returns the h3 addresses on the level below which are not children of the given H3 address but + * intersects with it. + */ + public static String[] h3ToNoChildrenIntersecting(String h3Address) { + return h3ToStringList(h3ToNoChildrenIntersecting(stringToH3(h3Address))); + } + + /** + * Returns the no child intersecting cell at the given position + */ + public static long noChildIntersectingPosToH3(long h3, int childPos) { + final int childrenRes = H3Index.H3_get_resolution(h3) + 1; + if (childrenRes > MAX_H3_RES) { + throw new IllegalArgumentException("Resolution overflow"); + } + final boolean isPentagon = isPentagon(h3); + final int maxPos = isPentagon ? 4 : 5; + if (childPos < 0 || childPos > maxPos) { + throw new IllegalArgumentException("invalid child position"); + } + final long childH = H3Index.H3_set_resolution(h3, childrenRes); + if (isPentagon) { + // Pentagon skip digit (position) is the number 1, therefore we add one + // for the skip digit and one for the 0 (center) digit. + final long child = H3Index.H3_set_index_digit(childH, childrenRes, childPos + 2); + return HexRing.h3NeighborInDirection(child, PEN_INTERSECTING_CHILDREN_DIRECTIONS[childPos]); + } else { + // we add one for the 0 (center) digit. + final long child = H3Index.H3_set_index_digit(childH, childrenRes, childPos + 1); + return HexRing.h3NeighborInDirection(child, HEX_INTERSECTING_CHILDREN_DIRECTIONS[childPos]); + } + } + + /** + * Returns the no child intersecting cell at the given position + */ + public static String noChildIntersectingPosToH3(String h3Address, int childPos) { + return h3ToString(noChildIntersectingPosToH3(stringToH3(h3Address), childPos)); + } + /** * Returns the neighbor indexes. * @@ -265,7 +360,59 @@ public static String[] hexRing(String h3Address) { * @return All neighbor indexes from the origin */ public static long[] hexRing(long h3) { - return HexRing.hexRing(h3); + final long[] ring = new long[hexRingSize(h3)]; + for (int i = 0; i < ring.length; i++) { + ring[i] = hexRingPosToH3(h3, i); + assert ring[i] >= 0; + } + return ring; + } + + /** + * Returns the number of neighbor indexes. + * + * @param h3 Origin index + * @return the number of neighbor indexes from the origin + */ + public static int hexRingSize(long h3) { + return H3Index.H3_is_pentagon(h3) ? 5 : 6; + } + + /** + * Returns the number of neighbor indexes. + * + * @param h3Address Origin index + * @return the number of neighbor indexes from the origin + */ + public static int hexRingSize(String h3Address) { + return hexRingSize(stringToH3(h3Address)); + } + + /** + * Returns the neighbor index at the given position. + * + * @param h3 Origin index + * @param ringPos position of the neighbour index + * @return the actual neighbour at the given position + */ + public static long hexRingPosToH3(long h3, int ringPos) { + // for pentagons, we skip direction at position 2 + final int pos = H3Index.H3_is_pentagon(h3) && ringPos >= 2 ? ringPos + 1 : ringPos; + if (pos < 0 || pos > 5) { + throw new IllegalArgumentException("invalid ring position"); + } + return HexRing.h3NeighborInDirection(h3, HexRing.DIRECTIONS[pos].digit()); + } + + /** + * Returns the neighbor index at the given position. + * + * @param h3Address Origin index + * @param ringPos position of the neighbour index + * @return the actual neighbour at the given position + */ + public static String hexRingPosToH3(String h3Address, int ringPos) { + return h3ToString(hexRingPosToH3(stringToH3(h3Address), ringPos)); } /** @@ -291,23 +438,140 @@ public static boolean areNeighborCells(long origin, long destination) { } /** - * cellToChildrenSize returns the exact number of children for a cell at a + * h3ToChildrenSize returns the exact number of children for a cell at a * given child resolution. * - * @param h H3Index to find the number of children of + * @param h3 H3Index to find the number of children of + * @param childRes The child resolution you're interested in * - * @return int Exact number of children (handles hexagons and pentagons + * @return long Exact number of children (handles hexagons and pentagons * correctly) */ - private static int cellToChildrenSize(long h) { - int n = 1; - if (H3Index.H3_is_pentagon(h)) { - return (1 + 5 * (_ipow(7, n) - 1) / 6); + public static long h3ToChildrenSize(long h3, int childRes) { + final int parentRes = H3Index.H3_get_resolution(h3); + if (childRes <= parentRes || childRes > MAX_H3_RES) { + throw new IllegalArgumentException("Invalid child resolution [" + childRes + "]"); + } + final int n = childRes - parentRes; + if (H3Index.H3_is_pentagon(h3)) { + return (1L + 5L * (_ipow(7, n) - 1L) / 6L); } else { return _ipow(7, n); } } + /** + * h3ToChildrenSize returns the exact number of children for a h3 affress at a + * given child resolution. + * + * @param h3Address H3 address to find the number of children of + * @param childRes The child resolution you're interested in + * + * @return int Exact number of children (handles hexagons and pentagons + * correctly) + */ + public static long h3ToChildrenSize(String h3Address, int childRes) { + return h3ToChildrenSize(stringToH3(h3Address), childRes); + } + + /** + * h3ToChildrenSize returns the exact number of children + * + * @param h3 H3Index to find the number of children. + * + * @return int Exact number of children, 6 for Pentagons and 7 for hexagons, + */ + public static int h3ToChildrenSize(long h3) { + if (H3Index.H3_get_resolution(h3) == MAX_H3_RES) { + throw new IllegalArgumentException("Invalid child resolution [" + MAX_H3_RES + "]"); + } + return isPentagon(h3) ? 6 : 7; + } + + /** + * h3ToChildrenSize returns the exact number of children + * + * @param h3Address H3 address to find the number of children. + * + * @return int Exact number of children, 6 for Pentagons and 7 for hexagons, + */ + public static int h3ToChildrenSize(String h3Address) { + return h3ToChildrenSize(stringToH3(h3Address)); + } + + /** + * h3ToNotIntersectingChildrenSize returns the exact number of children intersecting + * the given parent but not part of the children set. + * + * @param h3 H3Index to find the number of children. + * + * @return int Exact number of children, 5 for Pentagons and 6 for hexagons, + */ + public static int h3ToNotIntersectingChildrenSize(long h3) { + if (H3Index.H3_get_resolution(h3) == MAX_H3_RES) { + throw new IllegalArgumentException("Invalid child resolution [" + MAX_H3_RES + "]"); + } + return isPentagon(h3) ? 5 : 6; + } + + /** + * h3ToNotIntersectingChildrenSize returns the exact number of children intersecting + * the given parent but not part of the children set. + * + * @param h3Address H3 address to find the number of children. + * + * @return int Exact number of children, 5 for Pentagons and 6 for hexagons, + */ + public static int h3ToNotIntersectingChildrenSize(String h3Address) { + return h3ToNotIntersectingChildrenSize(stringToH3(h3Address)); + } + + /** + * Find the h3 index containing the North Pole at the given resolution. + * + * @param res the provided resolution. + * + * @return the h3 index containing the North Pole. + */ + public static long northPolarH3(int res) { + checkResolution(res); + return NORTH[res]; + } + + /** + * Find the h3 address containing the North Pole at the given resolution. + * + * @param res the provided resolution. + * + * @return the h3 address containing the North Pole. + */ + public static String northPolarH3Address(int res) { + return h3ToString(northPolarH3(res)); + } + + /** + * Find the h3 index containing the South Pole at the given resolution. + * + * @param res the provided resolution. + * + * @return the h3 index containing the South Pole. + */ + public static long southPolarH3(int res) { + checkResolution(res); + return SOUTH[res]; + } + + /** + * Find the h3 address containing the South Pole at the given resolution. + * + * @param res the provided resolution. + * + * @return the h3 address containing the South Pole. + */ + public static String southPolarH3Address(int res) { + return h3ToString(southPolarH3(res)); + } + /** * _ipow does integer exponentiation efficiently. Taken from StackOverflow. * @@ -316,8 +580,8 @@ private static int cellToChildrenSize(long h) { * * @return the exponentiated value */ - private static int _ipow(int base, int exp) { - int result = 1; + private static long _ipow(int base, int exp) { + long result = 1; while (exp != 0) { if ((exp & 1) != 0) { result *= base; diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/HexRing.java b/libs/h3/src/main/java/org/elasticsearch/h3/HexRing.java index 3dfe2417be06..6ee6bd73fb66 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/HexRing.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/HexRing.java @@ -309,7 +309,7 @@ final class HexRing { * \\2/ * */ - private static final CoordIJK.Direction[] DIRECTIONS = new CoordIJK.Direction[] { + static final CoordIJK.Direction[] DIRECTIONS = new CoordIJK.Direction[] { CoordIJK.Direction.J_AXES_DIGIT, CoordIJK.Direction.JK_AXES_DIGIT, CoordIJK.Direction.K_AXES_DIGIT, @@ -587,31 +587,6 @@ final class HexRing { CoordIJK.Direction.I_AXES_DIGIT, CoordIJK.Direction.J_AXES_DIGIT }; - /** - * Produce all neighboring cells. For Hexagons there will be 6 neighbors while - * for pentagon just 5. - * Output is placed in the provided array in no particular order. - * - * @param origin origin cell - */ - public static long[] hexRing(long origin) { - final long[] out = H3Index.H3_is_pentagon(origin) ? new long[5] : new long[6]; - int idx = 0; - long previous = -1; - for (int i = 0; i < 6; i++) { - long neighbor = h3NeighborInDirection(origin, DIRECTIONS[i].digit()); - if (neighbor != -1) { - // -1 is an expected case when trying to traverse off of pentagons. - if (previous != neighbor) { - out[idx++] = neighbor; - previous = neighbor; - } - } - } - assert idx == out.length; - return out; - } - /** * Returns whether or not the provided H3Indexes are neighbors. * @param origin The origin H3 index. @@ -700,7 +675,7 @@ public static boolean areNeighbours(long origin, long destination) { * @param dir Direction to move in * @return H3Index of the specified neighbor or -1 if there is no more neighbor */ - private static long h3NeighborInDirection(long origin, int dir) { + static long h3NeighborInDirection(long origin, int dir) { long current = origin; int newRotations = 0; diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/Iterator.java b/libs/h3/src/main/java/org/elasticsearch/h3/Iterator.java deleted file mode 100644 index d711c3e4224a..000000000000 --- a/libs/h3/src/main/java/org/elasticsearch/h3/Iterator.java +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * This project is based on a modification of https://github.com/uber/h3 which is licensed under the Apache 2.0 License. - * - * Copyright 2021 Uber Technologies, Inc. - */ -package org.elasticsearch.h3; - -/** - * Iterator structures and functions for the children of a cell. - */ -final class Iterator { - /** - * Invalid index used to indicate an error from latLngToCell and related - * functions or missing data in arrays of H3 indices. Analogous to NaN in - * floating point. - */ - public static final long H3_NULL = 0; - - /** - * The number of bits in a single H3 resolution digit. - */ - private static final int H3_PER_DIGIT_OFFSET = 3; - - /** - * IterCellsChildren: struct for iterating through the descendants of - * a given cell. - *

- * Constructors: - *

- * Initialize with either `iterInitParent` or `iterInitBaseCellNum`. - * `iterInitParent` sets up an iterator for all the children of a given - * parent cell at a given resolution. - *

- * `iterInitBaseCellNum` sets up an iterator for children cells, given - * a base cell number (0--121). - *

- * Iteration: - *

- * Step iterator with `iterStepChild`. - * During the lifetime of the `IterCellsChildren`, the current iterate - * is accessed via the `IterCellsChildren.h` member. - * When the iterator is exhausted or if there was an error in initialization, - * `IterCellsChildren.h` will be `H3_NULL` even after calling `iterStepChild`. - */ - static class IterCellsChildren { - long h; - int _parentRes; // parent resolution - int _skipDigit; // this digit skips `1` for pentagons - - IterCellsChildren(long h, int _parentRes, int _skipDigit) { - this.h = h; - this._parentRes = _parentRes; - this._skipDigit = _skipDigit; - } - } - - /** - * Create a fully nulled-out child iterator for when an iterator is exhausted. - * This helps minimize the chance that a user will depend on the iterator - * internal state after it's exhausted, like the child resolution, for - * example. - */ - private static IterCellsChildren nullIter() { - return new IterCellsChildren(H3_NULL, -1, -1); - } - - /** - ## Logic for iterating through the children of a cell - We'll describe the logic for .... - - normal (non pentagon iteration) - - pentagon iteration. define "pentagon digit" - ### Cell Index Component Diagrams - The lower 56 bits of an H3 Cell Index describe the following index components: - - the cell resolution (4 bits) - - the base cell number (7 bits) - - the child cell digit for each resolution from 1 to 15 (3*15 = 45 bits) - These are the bits we'll be focused on when iterating through child cells. - To help describe the iteration logic, we'll use diagrams displaying the - (decimal) values for each component like: - child digit for resolution 2 - / - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | ... | - |-----|-------------|---|---|---|---|---|---|-----| - | 9 | 17 | 5 | 3 | 0 | 6 | 2 | 1 | ... | - ### Iteration through children of a hexagon (but not a pentagon) - Iteration through the children of a *hexagon* (but not a pentagon) - simply involves iterating through all the children values (0--6) - for each child digit (up to the child's resolution). - For example, suppose a resolution 3 hexagon index has the following - components: - parent resolution - / - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | ... | - |-----|-------------|---|---|---|---|---|---|-----| - | 3 | 17 | 3 | 5 | 1 | 7 | 7 | 7 | ... | - The iteration through all children of resolution 6 would look like: - parent res child res - / / - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | ... | - |-----|-------------|---|---|---|---|---|---|---|---|-----| - | 6 | 17 | 3 | 5 | 1 | 0 | 0 | 0 | 7 | 7 | ... | - | 6 | 17 | 3 | 5 | 1 | 0 | 0 | 1 | 7 | 7 | ... | - | ... | | | | | | | | | | | - | 6 | 17 | 3 | 5 | 1 | 0 | 0 | 6 | 7 | 7 | ... | - | 6 | 17 | 3 | 5 | 1 | 0 | 1 | 0 | 7 | 7 | ... | - | 6 | 17 | 3 | 5 | 1 | 0 | 1 | 1 | 7 | 7 | ... | - | ... | | | | | | | | | | | - | 6 | 17 | 3 | 5 | 1 | 6 | 6 | 6 | 7 | 7 | ... | - ### Step sequence on a *pentagon* cell - Pentagon cells have a base cell number (e.g., 97) corresponding to a - resolution 0 pentagon, and have all zeros from digit 1 to the digit - corresponding to the cell's resolution. - (We'll drop the ellipses from now on, knowing that digits should contain - 7's beyond the cell resolution.) - parent res child res - / / - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 0 | 0 | - Iteration through children of a *pentagon* is almost the same - as *hexagon* iteration, except that we skip the *first* 1 value - that appears in the "skip digit". This corresponds to the fact - that a pentagon only has 6 children, which are denoted with - the numbers {0,2,3,4,5,6}. - The skip digit starts at the child resolution position. - When iterating through children more than one resolution below - the parent, we move the skip digit to the left - (up to the next coarser resolution) each time we skip the 1 value - in that digit. - Iteration would start like: - parent res child res - / / - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 0 | 0 | - \ - skip digit - Noticing we skip the 1 value and move the skip digit, - the next iterate would be: - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 0 | 2 | - \ - skip digit - Iteration continues normally until we get to: - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 0 | 6 | - \ - skip digit - which is followed by (skipping the 1): - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 2 | 0 | - \ - skip digit - For the next iterate, we won't skip the `1` in the previous digit - because it is no longer the skip digit: - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 2 | 1 | - \ - skip digit - Iteration continues normally until we're right before the next skip - digit: - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 0 | 6 | 6 | - \ - skip digit - Which is followed by - | res | base cell # | 1 | 2 | 3 | 4 | 5 | 6 | - |-----|-------------|---|---|---|---|---|---| - | 6 | 97 | 0 | 0 | 0 | 2 | 0 | 0 | - \ - skip digit - and so on. - */ - - /** - * Initialize a IterCellsChildren struct representing the sequence giving - * the children of cell `h` at resolution `childRes`. - *

- * At any point in the iteration, starting once - * the struct is initialized, IterCellsChildren.h gives the current child. - *

- * Also, IterCellsChildren.h == H3_NULL when all the children have been iterated - * through, or if the input to `iterInitParent` was invalid. - */ - public static IterCellsChildren iterInitParent(long h, int childRes) { - - int parentRes = H3Index.H3_get_resolution(h); - - if (childRes < parentRes || childRes > Constants.MAX_H3_RES || h == H3_NULL) { - return nullIter(); - } - - long newH = zeroIndexDigits(h, parentRes + 1, childRes); - newH = H3Index.H3_set_resolution(newH, childRes); - - int _skipDigit; - if (H3Index.H3_is_pentagon(newH)) { - // The skip digit skips `1` for pentagons. - // The "_skipDigit" moves to the left as we count up from the - // child resolution to the parent resolution. - _skipDigit = childRes; - } else { - // if not a pentagon, we can ignore "skip digit" logic - _skipDigit = -1; - } - - return new IterCellsChildren(newH, parentRes, _skipDigit); - } - - /** - * Step a IterCellsChildren to the next child cell. - * When the iteration is over, IterCellsChildren.h will be H3_NULL. - * Handles iterating through hexagon and pentagon cells. - */ - public static void iterStepChild(IterCellsChildren it) { - // once h == H3_NULL, the iterator returns an infinite sequence of H3_NULL - if (it.h == H3_NULL) return; - - int childRes = H3Index.H3_get_resolution(it.h); - - incrementResDigit(it, childRes); - - for (int i = childRes; i >= it._parentRes; i--) { - if (i == it._parentRes) { - // if we're modifying the parent resolution digit, then we're done - // *it = _null_iter(); - it.h = H3_NULL; - return; - } - - // PENTAGON_SKIPPED_DIGIT == 1 - if (i == it._skipDigit && getResDigit(it, i) == CoordIJK.Direction.PENTAGON_SKIPPED_DIGIT.digit()) { - // Then we are iterating through the children of a pentagon cell. - // All children of a pentagon have the property that the first - // nonzero digit between the parent and child resolutions is - // not 1. - // I.e., we never see a sequence like 00001. - // Thus, we skip the `1` in this digit. - incrementResDigit(it, i); - it._skipDigit -= 1; - return; - } - - // INVALID_DIGIT == 7 - if (getResDigit(it, i) == CoordIJK.Direction.INVALID_DIGIT.digit()) { - incrementResDigit(it, i); // zeros out it[i] and increments it[i-1] by 1 - } else { - break; - } - } - } - - // extract the `res` digit (0--7) of the current cell - private static int getResDigit(IterCellsChildren it, int res) { - return H3Index.H3_get_index_digit(it.h, res); - } - - /** - * Zero out index digits from start to end, inclusive. - * No-op if start > end. - */ - private static long zeroIndexDigits(long h, int start, int end) { - if (start > end) { - return h; - } - - long m = 0; - - m = ~m; - m <<= H3_PER_DIGIT_OFFSET * (end - start + 1); - m = ~m; - m <<= H3_PER_DIGIT_OFFSET * (Constants.MAX_H3_RES - end); - m = ~m; - - return h & m; - } - - // increment the digit (0--7) at location `res` - private static void incrementResDigit(IterCellsChildren it, int res) { - long val = 1; - val <<= H3_PER_DIGIT_OFFSET * (Constants.MAX_H3_RES - res); - it.h += val; - } -} diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java b/libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java index 638498da72a1..713a09df8863 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/LatLng.java @@ -22,9 +22,19 @@ */ package org.elasticsearch.h3; +import java.util.Objects; + /** pair of latitude/longitude */ public final class LatLng { + /** Minimum Angular resolution. */ + private static final double MINIMUM_ANGULAR_RESOLUTION = Math.PI * 1.0e-12; // taken from lucene's spatial3d + + /** + * pi / 2.0 + */ + private static final double M_PI_2 = 1.5707963267948966; + // lat / lon in radians private final double lon; private final double lat; @@ -62,9 +72,126 @@ public double getLonDeg() { * @return The azimuth in radians. */ double geoAzimuthRads(double lat, double lon) { - return Math.atan2( - Math.cos(lat) * Math.sin(lon - this.lon), - Math.cos(this.lat) * Math.sin(lat) - Math.sin(this.lat) * Math.cos(lat) * Math.cos(lon - this.lon) + // algorithm from the original H3 library + final double cosLat = FastMath.cos(lat); + return FastMath.atan2( + cosLat * FastMath.sin(lon - this.lon), + FastMath.cos(this.lat) * FastMath.sin(lat) - FastMath.sin(this.lat) * cosLat * FastMath.cos(lon - this.lon) ); } + + /** + * Computes the point on the sphere with a specified azimuth and distance from + * this point. + * + * @param az The desired azimuth. + * @param distance The desired distance. + * @return The LatLng point. + */ + LatLng geoAzDistanceRads(double az, double distance) { + // algorithm from the original H3 library + az = Vec2d.posAngleRads(az); + final double sinDistance = FastMath.sin(distance); + final double cosDistance = FastMath.cos(distance); + final double sinP1Lat = FastMath.sin(getLatRad()); + final double cosP1Lat = FastMath.cos(getLatRad()); + final double sinlat = Math.max(-1.0, Math.min(1.0, sinP1Lat * cosDistance + cosP1Lat * sinDistance * FastMath.cos(az))); + final double lat = FastMath.asin(sinlat); + if (Math.abs(lat - M_PI_2) < Constants.EPSILON) { // north pole + return new LatLng(M_PI_2, 0.0); + } else if (Math.abs(lat + M_PI_2) < Constants.EPSILON) { // south pole + return new LatLng(-M_PI_2, 0.0); + } else { + final double cosLat = FastMath.cos(lat); + final double sinlng = Math.max(-1.0, Math.min(1.0, FastMath.sin(az) * sinDistance / cosLat)); + final double coslng = Math.max(-1.0, Math.min(1.0, (cosDistance - sinP1Lat * FastMath.sin(lat)) / cosP1Lat / cosLat)); + return new LatLng(lat, constrainLng(getLonRad() + FastMath.atan2(sinlng, coslng))); + } + } + + /** + * constrainLng makes sure longitudes are in the proper bounds + * + * @param lng The origin lng value + * @return The corrected lng value + */ + private static double constrainLng(double lng) { + while (lng > Math.PI) { + lng = lng - Constants.M_2PI; + } + while (lng < -Math.PI) { + lng = lng + Constants.M_2PI; + } + return lng; + } + + /** + * Determines the maximum latitude of the great circle defined by this LatLng to the provided LatLng. + * + * @param latLng The LatLng. + * @return The maximum latitude of the great circle in radians. + */ + public double greatCircleMaxLatitude(LatLng latLng) { + if (isNumericallyIdentical(latLng)) { + return latLng.lat; + } + return latLng.lat > this.lat ? greatCircleMaxLatitude(latLng, this) : greatCircleMaxLatitude(this, latLng); + } + + private static double greatCircleMaxLatitude(LatLng latLng1, LatLng latLng2) { + // we compute the max latitude using Clairaut's formula (https://streckenflug.at/download/formeln.pdf) + assert latLng1.lat >= latLng2.lat; + final double az = latLng1.geoAzimuthRads(latLng2.lat, latLng2.lon); + // the great circle contains the maximum latitude only if the azimuth is between -90 and 90 degrees. + if (Math.abs(az) < M_PI_2) { + return FastMath.acos(Math.abs(FastMath.sin(az) * FastMath.cos(latLng1.lat))); + } + return latLng1.lat; + } + + /** + * Determines the minimum latitude of the great circle defined by this LatLng to the provided LatLng. + * + * @param latLng The LatLng. + * @return The minimum latitude of the great circle in radians. + */ + public double greatCircleMinLatitude(LatLng latLng) { + if (isNumericallyIdentical(latLng)) { + return latLng.lat; + } + return latLng.lat < this.lat ? greatCircleMinLatitude(latLng, this) : greatCircleMinLatitude(this, latLng); + } + + private static double greatCircleMinLatitude(LatLng latLng1, LatLng latLng2) { + assert latLng1.lat <= latLng2.lat; + // we compute the min latitude using Clairaut's formula (https://streckenflug.at/download/formeln.pdf) + final double az = latLng1.geoAzimuthRads(latLng2.lat, latLng2.lon); + // the great circle contains the minimum latitude only if the azimuth is not between -90 and 90 degrees. + if (Math.abs(az) > M_PI_2) { + // note the sign + return -FastMath.acos(Math.abs(FastMath.sin(az) * FastMath.cos(latLng1.lat))); + } + return latLng1.lat; + } + + boolean isNumericallyIdentical(LatLng latLng) { + return Math.abs(this.lat - latLng.lat) < MINIMUM_ANGULAR_RESOLUTION && Math.abs(this.lon - latLng.lon) < MINIMUM_ANGULAR_RESOLUTION; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final LatLng latLng = (LatLng) o; + return Double.compare(latLng.lon, lon) == 0 && Double.compare(latLng.lat, lat) == 0; + } + + @Override + public int hashCode() { + return Objects.hash(lon, lat); + } } diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/Vec2d.java b/libs/h3/src/main/java/org/elasticsearch/h3/Vec2d.java index 5aa59b534b96..12ce728a9996 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/Vec2d.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/Vec2d.java @@ -32,6 +32,8 @@ final class Vec2d { /** sin(60') */ private static final double M_SIN60 = Constants.M_SQRT3_2; + private static final double VEC2D_RESOLUTION = 1e-7; + /** * icosahedron face centers in lat/lng radians */ @@ -85,19 +87,6 @@ final class Vec2d { { 2.361378999196363184, 0.266983896803167583, 4.455774101589558636 }, // face 19 }; - /** - * pi - */ - private static final double M_PI = 3.14159265358979323846; - /** - * pi / 2.0 - */ - private static final double M_PI_2 = 1.5707963267948966; - /** - * 2.0 * PI - */ - public static final double M_2PI = 6.28318530717958647692528676655900576839433; - private final double x; /// < x component private final double y; /// < y component @@ -107,7 +96,7 @@ final class Vec2d { } /** - * Determines the center point in spherical coordinates of a cell given by 2D + * Determines the center point in spherical coordinates of a cell given by this 2D * hex coordinates on a particular icosahedral face. * * @param face The icosahedral face upon which the 2D hex coordinate system is @@ -117,14 +106,30 @@ final class Vec2d { * grid relative to the specified resolution. */ public LatLng hex2dToGeo(int face, int res, boolean substrate) { + return hex2dToGeo(this.x, this.y, face, res, substrate); + } + + /** + * Determines the center point in spherical coordinates of a cell given by the provided 2D + * hex coordinates on a particular icosahedral face. + * + * @param x The x component of the 2D hex coordinates. + * @param y The y component of the 2D hex coordinates. + * @param face The icosahedral face upon which the 2D hex coordinate system is + * centered. + * @param res The H3 resolution of the cell. + * @param substrate Indicates whether or not this grid is actually a substrate + * grid relative to the specified resolution. + */ + static LatLng hex2dToGeo(double x, double y, int face, int res, boolean substrate) { // calculate (r, theta) in hex2d - double r = v2dMag(); + double r = Math.sqrt(x * x + y * y); if (r < Constants.EPSILON) { return faceCenterGeo[face]; } - double theta = Math.atan2(y, x); + double theta = FastMath.atan2(y, x); // scale for current resolution length u for (int i = 0; i < res; i++) { @@ -142,7 +147,7 @@ public LatLng hex2dToGeo(int face, int res, boolean substrate) { r *= Constants.RES0_U_GNOMONIC; // perform inverse gnomonic scaling of r - r = Math.atan(r); + r = FastMath.atan(r); // adjust theta for Class III // if a substrate grid, then it's already been adjusted for Class III @@ -153,7 +158,7 @@ public LatLng hex2dToGeo(int face, int res, boolean substrate) { // find theta as an azimuth theta = posAngleRads(faceAxesAzRadsCII[face][0] - theta); // now find the point at (r,theta) from the face center - return geoAzDistanceRads(faceCenterGeo[face], theta, r); + return Vec3d.faceCenterPoint[face].geoAzDistanceRads(theta, r); } /** @@ -162,13 +167,13 @@ public LatLng hex2dToGeo(int face, int res, boolean substrate) { * */ static CoordIJK hex2dToCoordIJK(double x, double y) { - double a1, a2; - double x1, x2; - int m1, m2; - double r1, r2; + final double a1, a2; + final double x1, x2; + final int m1, m2; + final double r1, r2; // quantize into the ij system and then normalize - int k = 0; + final int k = 0; int i; int j; @@ -194,17 +199,17 @@ static CoordIJK hex2dToCoordIJK(double x, double y) { j = m2; } else { i = m1; - j = m2 + 1; + j = Math.incrementExact(m2); } } else { if (r2 < (1.0 - r1)) { j = m2; } else { - j = m2 + 1; + j = Math.incrementExact(m2); } if ((1.0 - r1) <= r2 && r2 < (2.0 * r1)) { - i = m1 + 1; + i = Math.incrementExact(m1); } else { i = m1; } @@ -214,21 +219,21 @@ static CoordIJK hex2dToCoordIJK(double x, double y) { if (r2 < (1.0 - r1)) { j = m2; } else { - j = m2 + 1; + j = Math.addExact(m2, 1); } if ((2.0 * r1 - 1.0) < r2 && r2 < (1.0 - r1)) { i = m1; } else { - i = m1 + 1; + i = Math.incrementExact(m1); } } else { if (r2 < (r1 / 2.0)) { - i = m1 + 1; + i = Math.incrementExact(m1); j = m2; } else { - i = m1 + 1; - j = m2 + 1; + i = Math.incrementExact(m1); + j = Math.incrementExact(m2); } } } @@ -238,25 +243,29 @@ static CoordIJK hex2dToCoordIJK(double x, double y) { if (x < 0.0) { if ((j % 2) == 0) // even { - int axisi = j / 2; - int diff = i - axisi; - i = i - 2 * diff; + final int axisi = j / 2; + final int diff = Math.subtractExact(i, axisi); + i = Math.subtractExact(i, Math.multiplyExact(2, diff)); } else { - int axisi = (j + 1) / 2; - int diff = i - axisi; - i = i - (2 * diff + 1); + final int axisi = Math.addExact(j, 1) / 2; + final int diff = Math.subtractExact(i, axisi); + i = Math.subtractExact(i, Math.addExact(Math.multiplyExact(2, diff), 1)); } } if (y < 0.0) { - i = i - (2 * j + 1) / 2; - j = -1 * j; + i = Math.subtractExact(i, Math.addExact(Math.multiplyExact(2, j), 1) / 2); + j = Math.multiplyExact(-1, j); } final CoordIJK coordIJK = new CoordIJK(i, j, k); coordIJK.ijkNormalize(); return coordIJK; } + public boolean numericallyIdentical(Vec2d vec2d) { + return Math.abs(vec2d.x - x) < VEC2D_RESOLUTION && Math.abs(vec2d.y - y) < VEC2D_RESOLUTION; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -280,25 +289,14 @@ public int hashCode() { * @param p3 The second endpoint of the second line. */ public static Vec2d v2dIntersect(Vec2d p0, Vec2d p1, Vec2d p2, Vec2d p3) { - double[] s1 = new double[2], s2 = new double[2]; - s1[0] = p1.x - p0.x; - s1[1] = p1.y - p0.y; - s2[0] = p3.x - p2.x; - s2[1] = p3.y - p2.y; - - float t; - t = (float) ((s2[0] * (p0.y - p2.y) - s2[1] * (p0.x - p2.x)) / (-s2[0] * s1[1] + s1[0] * s2[1])); + final double s1x = p1.x - p0.x; + final double s1y = p1.y - p0.y; + final double s2x = p3.x - p2.x; + final double s2y = p3.y - p2.y; - return new Vec2d(p0.x + (t * s1[0]), p0.y + (t * s1[1])); - } + final double t = ((s2x * (p0.y - p2.y) - s2y * (p0.x - p2.x)) / (-s2x * s1y + s1x * s2y)); - /** - * Calculates the magnitude of a 2D cartesian vector. - * - * @return The magnitude of the vector. - */ - private double v2dMag() { - return Math.sqrt(x * x + y * y); + return new Vec2d(p0.x + (t * s1x), p0.y + (t * s1y)); } /** @@ -309,106 +307,11 @@ private double v2dMag() { */ static double posAngleRads(double rads) { if (rads < 0.0) { - return rads + M_2PI; - } else if (rads >= M_2PI) { - return rads - M_2PI; + return rads + Constants.M_2PI; + } else if (rads >= Constants.M_2PI) { + return rads - Constants.M_2PI; } else { return rads; } } - - /** - * Computes the point on the sphere a specified azimuth and distance from - * another point. - * - * @param p1 The first spherical coordinates. - * @param az The desired azimuth from p1. - * @param distance The desired distance from p1, must be non-negative. - * p1. - */ - private static LatLng geoAzDistanceRads(LatLng p1, double az, double distance) { - if (distance < Constants.EPSILON) { - return p1; - } - - double sinlat, sinlng, coslng; - - az = posAngleRads(az); - - double lat, lon; - - // check for due north/south azimuth - if (az < Constants.EPSILON || Math.abs(az - M_PI) < Constants.EPSILON) { - if (az < Constants.EPSILON) {// due north - lat = p1.getLatRad() + distance; - } else { // due south - lat = p1.getLatRad() - distance; - } - if (Math.abs(lat - M_PI_2) < Constants.EPSILON) { // north pole - lat = M_PI_2; - lon = 0.0; - } else if (Math.abs(lat + M_PI_2) < Constants.EPSILON) { // south pole - lat = -M_PI_2; - lon = 0.0; - } else { - lon = constrainLng(p1.getLonRad()); - } - } else { // not due north or south - final double sinDistance = Math.sin(distance); - final double cosDistance = Math.cos(distance); - final double sinP1Lat = Math.sin(p1.getLatRad()); - final double cosP1Lat = Math.cos(p1.getLatRad()); - sinlat = sinP1Lat * cosDistance + cosP1Lat * sinDistance * Math.cos(az); - if (sinlat > 1.0) { - sinlat = 1.0; - } - if (sinlat < -1.0) { - sinlat = -1.0; - } - lat = Math.asin(sinlat); - if (Math.abs(lat - M_PI_2) < Constants.EPSILON) // north pole - { - lat = M_PI_2; - lon = 0.0; - } else if (Math.abs(lat + M_PI_2) < Constants.EPSILON) // south pole - { - lat = -M_PI_2; - lon = 0.0; - } else { - final double cosLat = Math.cos(lat); - sinlng = Math.sin(az) * sinDistance / cosLat; - coslng = (cosDistance - sinP1Lat * Math.sin(lat)) / cosP1Lat / cosLat; - if (sinlng > 1.0) { - sinlng = 1.0; - } - if (sinlng < -1.0) { - sinlng = -1.0; - } - if (coslng > 1.0) { - coslng = 1.0; - } - if (coslng < -1.0) { - coslng = -1.0; - } - lon = constrainLng(p1.getLonRad() + Math.atan2(sinlng, coslng)); - } - } - return new LatLng(lat, lon); - } - - /** - * constrainLng makes sure longitudes are in the proper bounds - * - * @param lng The origin lng value - * @return The corrected lng value - */ - private static double constrainLng(double lng) { - while (lng > M_PI) { - lng = lng - (2 * M_PI); - } - while (lng < -M_PI) { - lng = lng + (2 * M_PI); - } - return lng; - } } diff --git a/libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java b/libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java index 814f0afa8d78..c5c4f8975597 100644 --- a/libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java +++ b/libs/h3/src/main/java/org/elasticsearch/h3/Vec3d.java @@ -81,10 +81,10 @@ private double pointSquareDist(double x, double y, double z) { * @return The H3 index. */ static long geoToH3(int res, double lat, double lon) { - final double cosLat = Math.cos(lat); - final double z = Math.sin(lat); - final double x = Math.cos(lon) * cosLat; - final double y = Math.sin(lon) * cosLat; + final double cosLat = FastMath.cos(lat); + final double z = FastMath.sin(lat); + final double x = FastMath.cos(lon) * cosLat; + final double y = FastMath.sin(lon) * cosLat; // determine the icosahedron face int face = 0; double sqd = Vec3d.faceCenterPoint[0].pointSquareDist(x, y, z); @@ -96,7 +96,7 @@ static long geoToH3(int res, double lat, double lon) { } } // cos(r) = 1 - 2 * sin^2(r/2) = 1 - 2 * (sqd / 4) = 1 - sqd/2 - double r = Math.acos(1 - sqd / 2); + double r = FastMath.acos(1 - sqd / 2); if (r < Constants.EPSILON) { return FaceIJK.faceIjkToH3(res, face, new CoordIJK(0, 0, 0)); @@ -113,7 +113,7 @@ static long geoToH3(int res, double lat, double lon) { } // perform gnomonic scaling of r - r = Math.tan(r); + r = FastMath.tan(r); // scale for current resolution length u r /= Constants.RES0_U_GNOMONIC; @@ -122,7 +122,7 @@ static long geoToH3(int res, double lat, double lon) { } // we now have (r, theta) in hex2d with theta ccw from x-axes // convert to face and centered IJK coordinates - return FaceIJK.faceIjkToH3(res, face, Vec2d.hex2dToCoordIJK(r * Math.cos(theta), r * Math.sin(theta))); + return FaceIJK.faceIjkToH3(res, face, Vec2d.hex2dToCoordIJK(r * FastMath.cos(theta), r * FastMath.sin(theta))); } /** @@ -164,7 +164,52 @@ private static double square(double x) { final double c1c2Z = c1X * c2Y - c1Y * c2X; final double sign = Math.signum(dotProduct(this.x, this.y, this.z, c1c2X, c1c2Y, c1c2Z)); - return Math.atan2(sign * magnitude(c1c2X, c1c2Y, c1c2Z), dotProduct(c1X, c1Y, c1Z, c2X, c2Y, c2Z)); + return FastMath.atan2(sign * magnitude(c1c2X, c1c2Y, c1c2Z), dotProduct(c1X, c1Y, c1Z, c2X, c2Y, c2Z)); + } + + /** + * Computes the point on the sphere with a specified azimuth and distance from + * this point. + * + * @param az The desired azimuth. + * @param distance The desired distance. + * @return The LatLng point. + */ + LatLng geoAzDistanceRads(double az, double distance) { + az = Vec2d.posAngleRads(az); + // from https://www.movable-type.co.uk/scripts/latlong-vectors.html + // N = {0,0,1} – vector representing north pole + // d̂e = N×a – east vector at a + // dn = a×de – north vector at a + // d = dn·cosθ + de·sinθ – direction vector in dir’n of θ + // b = a·cosδ + d·sinδ + + // east direction vector @ n1 (Gade's k_e_E) + final double magnitude = magnitude(this.x, this.y, 0); + final double deX = -this.y / magnitude; + final double deY = this.x / magnitude; + + // north direction vector @ n1 (Gade's (k_n_E) + final double dnX = -this.z * deY; + final double dnY = this.z * deX; + final double dnZ = this.x * deY - this.y * deX; + + final double sinAz = FastMath.sin(az); + final double cosAz = FastMath.cos(az); + final double sinDistance = FastMath.sin(distance); + final double cosDistance = FastMath.cos(distance); + + // direction vector @ n1 (≡ C×n1; C = great circle) + final double dX = dnX * cosAz + deX * sinAz; + final double dY = dnY * cosAz + deY * sinAz; + final double dZ = dnZ * cosAz; + + // Gade's n_EB_E = component of n2 parallel to n1 + component of n2 perpendicular to n1 + final double n2X = this.x * cosDistance + dX * sinDistance; + final double n2Y = this.y * cosDistance + dY * sinDistance; + final double n2Z = this.z * cosDistance + dZ * sinDistance; + + return new LatLng(FastMath.asin(n2Z), FastMath.atan2(n2Y, n2X)); } /** diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/AzimuthTests.java b/libs/h3/src/test/java/org/elasticsearch/h3/AzimuthTests.java deleted file mode 100644 index 1f342d0cc43d..000000000000 --- a/libs/h3/src/test/java/org/elasticsearch/h3/AzimuthTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch B.V. under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch B.V. licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.h3; - -import org.apache.lucene.spatial3d.geom.GeoPoint; -import org.apache.lucene.spatial3d.geom.PlanetModel; -import org.apache.lucene.tests.geo.GeoTestUtil; -import org.elasticsearch.test.ESTestCase; - -public class AzimuthTests extends ESTestCase { - - public void testLatLonVec3d() { - final double lat = Math.toRadians(GeoTestUtil.nextLatitude()); - final double lon = Math.toRadians(GeoTestUtil.nextLongitude()); - final GeoPoint point = new GeoPoint(PlanetModel.SPHERE, lat, lon); - for (int i = 0; i < Vec3d.faceCenterPoint.length; i++) { - final double azVec3d = Vec3d.faceCenterPoint[i].geoAzimuthRads(point.x, point.y, point.z); - final double azVec2d = Vec2d.faceCenterGeo[i].geoAzimuthRads(point.getLatitude(), point.getLongitude()); - assertEquals(azVec2d, azVec3d, 1e-14); - } - } -} diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/CellBoundaryTests.java b/libs/h3/src/test/java/org/elasticsearch/h3/CellBoundaryTests.java index 7101b0ced03b..903e4ed40ec1 100644 --- a/libs/h3/src/test/java/org/elasticsearch/h3/CellBoundaryTests.java +++ b/libs/h3/src/test/java/org/elasticsearch/h3/CellBoundaryTests.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.h3; +import org.apache.lucene.geo.GeoEncodingUtils; +import org.apache.lucene.tests.geo.GeoTestUtil; import org.elasticsearch.test.ESTestCase; import java.io.BufferedReader; @@ -30,6 +32,9 @@ import java.util.StringTokenizer; import java.util.zip.GZIPInputStream; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; + public class CellBoundaryTests extends ESTestCase { public void testRes0() throws Exception { @@ -171,8 +176,46 @@ private void processOne(String h3Address, BufferedReader reader) throws IOExcept CellBoundary boundary = H3.h3ToGeoBoundary(h3Address); assert boundary.numPoints() == points.size(); for (int i = 0; i < boundary.numPoints(); i++) { - assertEquals(h3Address, points.get(i)[0], boundary.getLatLon(i).getLatDeg(), 1e-8); - assertEquals(h3Address, points.get(i)[1], boundary.getLatLon(i).getLonDeg(), 1e-8); + assertEquals(h3Address, points.get(i)[0], boundary.getLatLon(i).getLatDeg(), 5e-7); + assertEquals(h3Address, points.get(i)[1], boundary.getLatLon(i).getLonDeg(), 5e-7); + } + } + + public void testNumericEquivalentSharedBoundary() { + // we consider boundaries numerical equivalent if after encoded them using lucene, they resolve to the same number. + long h3 = H3.geoToH3(GeoTestUtil.nextLatitude(), GeoTestUtil.nextLongitude(), randomIntBetween(0, 15)); + CellBoundary boundary = H3.h3ToGeoBoundary(h3); + for (long r : H3.hexRing(h3)) { + int count = 0; + CellBoundary ringBoundary = H3.h3ToGeoBoundary(r); + for (int i = 0; i < boundary.numPoints(); i++) { + LatLng latLng1 = boundary.getLatLon(i % boundary.numPoints()); + LatLng latLng2 = boundary.getLatLon((i + 1) % boundary.numPoints()); + int lon1 = GeoEncodingUtils.encodeLongitude(latLng1.getLonDeg()); + int lat1 = GeoEncodingUtils.encodeLatitude(latLng1.getLatDeg()); + int lon2 = GeoEncodingUtils.encodeLongitude(latLng2.getLonDeg()); + int lat2 = GeoEncodingUtils.encodeLatitude(latLng2.getLatDeg()); + if (isSharedBoundary(lon1, lat1, lon2, lat2, ringBoundary)) { + count++; + } + } + assertThat("For cell " + H3.h3ToString(h3), count, either(equalTo(1)).or(equalTo(2))); + } + } + + private boolean isSharedBoundary(int clon1, int clat1, int clon2, int clat2, CellBoundary boundary) { + for (int i = 0; i < boundary.numPoints(); i++) { + LatLng latLng1 = boundary.getLatLon(i % boundary.numPoints()); + LatLng latLng2 = boundary.getLatLon((i + 1) % boundary.numPoints()); + int lon1 = GeoEncodingUtils.encodeLongitude(latLng1.getLonDeg()); + int lat1 = GeoEncodingUtils.encodeLatitude(latLng1.getLatDeg()); + int lon2 = GeoEncodingUtils.encodeLongitude(latLng2.getLonDeg()); + int lat2 = GeoEncodingUtils.encodeLatitude(latLng2.getLatDeg()); + // edges are in opposite directions. + if (clon1 == lon2 & clat1 == lat2 && clon2 == lon1 && clat2 == lat1) { + return true; + } } + return false; } } diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/FastMathTests.java b/libs/h3/src/test/java/org/elasticsearch/h3/FastMathTests.java new file mode 100644 index 000000000000..76b15188a182 --- /dev/null +++ b/libs/h3/src/test/java/org/elasticsearch/h3/FastMathTests.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.h3; + +import org.elasticsearch.test.ESTestCase; + +import java.util.function.DoubleSupplier; +import java.util.function.DoubleUnaryOperator; + +public class FastMathTests extends ESTestCase { + + // accuracy for cos(x) + static double COS_DELTA = 1E-15; + // accuracy for sin(x) + static double SIN_DELTA = 1E-15; + // accuracy for asin(x) + static double ASIN_DELTA = 1E-14; + // accuracy for acos(x) + static double ACOS_DELTA = 1E-14; + // accuracy for tan(x) + static double TAN_DELTA = 1E-14; + // accuracy for atan(x) + static double ATAN_DELTA = 1E-14; + // accuracy for atan2(x) + static double ATAN2_DELTA = 1E-14; + + public void testSin() { + doTest(Math::sin, FastMath::sin, d -> SIN_DELTA, () -> randomDoubleBetween(-2 * Math.PI, 2 * Math.PI, true)); + } + + public void testCos() { + doTest(Math::cos, FastMath::cos, d -> COS_DELTA, () -> randomDoubleBetween(-2 * Math.PI, 2 * Math.PI, true)); + } + + public void testTan() { + doTest( + Math::tan, + FastMath::tan, + d -> Math.max(TAN_DELTA, Math.abs(Math.tan(d)) * TAN_DELTA), + () -> randomDoubleBetween(-2 * Math.PI, 2 * Math.PI, true) + ); + } + + public void testAsin() { + doTest(Math::asin, FastMath::asin, d -> ASIN_DELTA, () -> randomDoubleBetween(-2, 2, true)); + } + + public void testAcos() { + doTest(Math::acos, FastMath::acos, d -> ACOS_DELTA, () -> randomDoubleBetween(-2, 2, true)); + } + + public void testAtan() { + doTest( + Math::atan, + FastMath::atan, + d -> ATAN_DELTA, + () -> randomDoubleBetween(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true) + ); + } + + private void doTest(DoubleUnaryOperator expected, DoubleUnaryOperator actual, DoubleUnaryOperator delta, DoubleSupplier supplier) { + assertEquals(expected.applyAsDouble(Double.NaN), actual.applyAsDouble(Double.NaN), delta.applyAsDouble(Double.NaN)); + assertEquals( + expected.applyAsDouble(Double.NEGATIVE_INFINITY), + actual.applyAsDouble(Double.NEGATIVE_INFINITY), + delta.applyAsDouble(Double.POSITIVE_INFINITY) + ); + assertEquals( + expected.applyAsDouble(Double.POSITIVE_INFINITY), + actual.applyAsDouble(Double.POSITIVE_INFINITY), + delta.applyAsDouble(Double.POSITIVE_INFINITY) + ); + assertEquals( + expected.applyAsDouble(Double.MAX_VALUE), + actual.applyAsDouble(Double.MAX_VALUE), + delta.applyAsDouble(Double.MAX_VALUE) + ); + assertEquals( + expected.applyAsDouble(Double.MIN_VALUE), + actual.applyAsDouble(Double.MIN_VALUE), + delta.applyAsDouble(Double.MIN_VALUE) + ); + assertEquals(expected.applyAsDouble(0), actual.applyAsDouble(0), delta.applyAsDouble(0)); + for (int i = 0; i < 10000; i++) { + double d = supplier.getAsDouble(); + assertEquals(expected.applyAsDouble(d), actual.applyAsDouble(d), delta.applyAsDouble(d)); + } + } + + public void testAtan2() { + assertEquals(Math.atan2(Double.NaN, Double.NaN), FastMath.atan2(Double.NaN, Double.NaN), ATAN2_DELTA); + assertEquals( + Math.atan2(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY), + FastMath.atan2(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY), + ATAN2_DELTA + ); + assertEquals( + Math.atan2(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY), + FastMath.atan2(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY), + ATAN2_DELTA + ); + assertEquals(Math.atan2(Double.MAX_VALUE, Double.MAX_VALUE), FastMath.atan2(Double.MAX_VALUE, Double.MAX_VALUE), ATAN2_DELTA); + assertEquals(Math.atan2(Double.MIN_VALUE, Double.MIN_VALUE), FastMath.atan2(Double.MIN_VALUE, Double.MIN_VALUE), ATAN2_DELTA); + assertEquals(Math.atan2(0, 0), FastMath.atan2(0, 0), ATAN2_DELTA); + for (int i = 0; i < 10000; i++) { + double x = randomDoubleBetween(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true); + double y = randomDoubleBetween(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true); + assertEquals(Math.atan2(x, y), FastMath.atan2(x, y), ATAN2_DELTA); + } + } +} diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/GeoToH3Tests.java b/libs/h3/src/test/java/org/elasticsearch/h3/GeoToH3Tests.java index 61289c50dbb0..cb7d416a5a9d 100644 --- a/libs/h3/src/test/java/org/elasticsearch/h3/GeoToH3Tests.java +++ b/libs/h3/src/test/java/org/elasticsearch/h3/GeoToH3Tests.java @@ -55,4 +55,16 @@ private GeoPolygon getGeoPolygon(String h3Address) { } return GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); } + + public void testNorthPoleCells() { + for (int res = 0; res <= H3.MAX_H3_RES; res++) { + assertEquals(H3.northPolarH3Address(res), H3.geoToH3Address(90, GeoTestUtil.nextLongitude(), res)); + } + } + + public void testSouthPoleCells() { + for (int res = 0; res <= H3.MAX_H3_RES; res++) { + assertEquals(H3.southPolarH3Address(res), H3.geoToH3Address(-90, GeoTestUtil.nextLongitude(), res)); + } + } } diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/HexRingTests.java b/libs/h3/src/test/java/org/elasticsearch/h3/HexRingTests.java index 486a5be8d621..8fe5c6206fff 100644 --- a/libs/h3/src/test/java/org/elasticsearch/h3/HexRingTests.java +++ b/libs/h3/src/test/java/org/elasticsearch/h3/HexRingTests.java @@ -25,6 +25,15 @@ public class HexRingTests extends ESTestCase { + public void testInvalidHexRingPos() { + long h3 = H3.geoToH3(GeoTestUtil.nextLatitude(), GeoTestUtil.nextLongitude(), randomIntBetween(0, H3.MAX_H3_RES)); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> H3.hexRingPosToH3(h3, -1)); + assertEquals(ex.getMessage(), "invalid ring position"); + int pos = H3.isPentagon(h3) ? 5 : 6; + ex = expectThrows(IllegalArgumentException.class, () -> H3.hexRingPosToH3(h3, pos)); + assertEquals(ex.getMessage(), "invalid ring position"); + } + public void testHexRing() { for (int i = 0; i < 500; i++) { double lat = GeoTestUtil.nextLatitude(); diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/LatLngTests.java b/libs/h3/src/test/java/org/elasticsearch/h3/LatLngTests.java new file mode 100644 index 000000000000..12767af2127b --- /dev/null +++ b/libs/h3/src/test/java/org/elasticsearch/h3/LatLngTests.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch B.V. under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch B.V. licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.h3; + +import org.apache.lucene.spatial3d.geom.GeoPoint; +import org.apache.lucene.spatial3d.geom.LatLonBounds; +import org.apache.lucene.spatial3d.geom.Plane; +import org.apache.lucene.spatial3d.geom.PlanetModel; +import org.apache.lucene.spatial3d.geom.SidedPlane; +import org.apache.lucene.tests.geo.GeoTestUtil; +import org.elasticsearch.test.ESTestCase; + +public class LatLngTests extends ESTestCase { + + public void testLatLonAzimuthRads() { + final GeoPoint point = safePoint(); + for (int i = 0; i < Vec3d.faceCenterPoint.length; i++) { + final double azVec3d = Vec3d.faceCenterPoint[i].geoAzimuthRads(point.x, point.y, point.z); + final double azVec2d = Vec2d.faceCenterGeo[i].geoAzimuthRads(point.getLatitude(), point.getLongitude()); + assertEquals("Face " + i, azVec2d, azVec3d, 1e-12); + + } + } + + public void testLatLonAzDistanceRads() { + final double az = randomDoubleBetween(-2 * Math.PI, 2 * Math.PI, true); + final double distance = randomDoubleBetween(-Math.PI, Math.PI / 2, true); + for (int i = 0; i < Vec3d.faceCenterPoint.length; i++) { + final LatLng latLng3d = Vec3d.faceCenterPoint[i].geoAzDistanceRads(az, distance); + final LatLng latLng2d = Vec2d.faceCenterGeo[i].geoAzDistanceRads(az, distance); + assertTrue("Face " + i, latLng2d.isNumericallyIdentical(latLng3d)); + } + } + + /** + * Face 19 gives -180 vs 180 for azimuth when Latitude=-90 and Longitude is between 98 and 102. + * So we just exclude lat=-90 from the test to avoid this odd edge case. + */ + private GeoPoint safePoint() { + GeoPoint point; + do { + final double lat = Math.toRadians(GeoTestUtil.nextLatitude()); + final double lon = Math.toRadians(GeoTestUtil.nextLongitude()); + point = new GeoPoint(PlanetModel.SPHERE, lat, lon); + } while (point.getLatitude() == -Math.PI / 2); + return point; + } + + public void testGreatCircleMaxMinLatitude() { + for (int i = 0; i < 10; i++) { + final GeoPoint point1 = safePoint(); + final GeoPoint point2 = safePoint(); + final LatLng latLng1 = new LatLng(point1.getLatitude(), point1.getLongitude()); + final LatLng latLng2 = new LatLng(point2.getLatitude(), point2.getLongitude()); + final LatLonBounds bounds = getBounds(point1, point2); + assertEquals(bounds.getMaxLatitude(), latLng1.greatCircleMaxLatitude(latLng2), 1e-7); + assertEquals(bounds.getMinLatitude(), latLng1.greatCircleMinLatitude(latLng2), 1e-7); + } + } + + private LatLonBounds getBounds(final GeoPoint point1, final GeoPoint point2) { + final LatLonBounds bounds = new LatLonBounds(); + bounds.addPoint(point1); + bounds.addPoint(point2); + if (point1.isNumericallyIdentical(point2) == false) { + final Plane plane = new Plane(point1, point2); + bounds.addPlane(PlanetModel.SPHERE, plane, new SidedPlane(point1, plane, point2), new SidedPlane(point2, point1, plane)); + } + return bounds; + } + + public void testEqualsAndHashCode() { + final LatLng latLng = new LatLng(0, 0); + { + LatLng otherLatLng = new LatLng(1, 1); + assertNotEquals(latLng, otherLatLng); + assertNotEquals(latLng.hashCode(), otherLatLng.hashCode()); + } + { + LatLng otherLatLng = new LatLng(0, 0); + assertEquals(latLng, otherLatLng); + assertEquals(latLng.hashCode(), otherLatLng.hashCode()); + } + } +} diff --git a/libs/h3/src/test/java/org/elasticsearch/h3/ParentChildNavigationTests.java b/libs/h3/src/test/java/org/elasticsearch/h3/ParentChildNavigationTests.java index 4e8ed3e4b646..11d5a01b2d4e 100644 --- a/libs/h3/src/test/java/org/elasticsearch/h3/ParentChildNavigationTests.java +++ b/libs/h3/src/test/java/org/elasticsearch/h3/ParentChildNavigationTests.java @@ -20,10 +20,70 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.lucene.geo.Point; +import org.apache.lucene.spatial3d.geom.GeoPoint; +import org.apache.lucene.spatial3d.geom.GeoPolygon; +import org.apache.lucene.spatial3d.geom.GeoPolygonFactory; +import org.apache.lucene.spatial3d.geom.PlanetModel; +import org.apache.lucene.tests.geo.GeoTestUtil; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; public class ParentChildNavigationTests extends ESTestCase { + public void testChildrenSize() { + Point point = GeoTestUtil.nextPoint(); + int res = randomInt(H3.MAX_H3_RES - 1); + String h3Address = H3.geoToH3Address(point.getLat(), point.getLon(), res); + // check invalid resolutions + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> H3.h3ToChildrenSize(h3Address, res)); + assertThat(ex.getMessage(), Matchers.containsString("Invalid child resolution")); + ex = expectThrows(IllegalArgumentException.class, () -> H3.h3ToChildrenSize(h3Address, H3.MAX_H3_RES + 1)); + assertThat(ex.getMessage(), Matchers.containsString("Invalid child resolution")); + ex = expectThrows( + IllegalArgumentException.class, + () -> H3.h3ToChildrenSize(H3.geoToH3(point.getLat(), point.getLon(), H3.MAX_H3_RES)) + ); + assertThat(ex.getMessage(), Matchers.containsString("Invalid child resolution")); + // check methods gives same answer + assertEquals(H3.h3ToChildrenSize(h3Address), H3.h3ToChildrenSize(h3Address, res + 1)); + // check against brute force counting + int childrenRes = Math.min(H3.MAX_H3_RES, res + randomIntBetween(2, 7)); + long numChildren = H3.h3ToChildrenSize(h3Address, childrenRes); + assertEquals(numChildren(h3Address, childrenRes), numChildren); + } + + private long numChildren(String h3Address, int finalRes) { + if (H3.getResolution(h3Address) == finalRes) { + return 1; + } + long result = 0; + for (int i = 0; i < H3.h3ToChildrenSize(h3Address); i++) { + result += numChildren(H3.childPosToH3(h3Address, i), finalRes); + } + return result; + } + + public void testNoChildrenIntersectingSize() { + Point point = GeoTestUtil.nextPoint(); + int res = randomInt(H3.MAX_H3_RES - 1); + String h3Address = H3.geoToH3Address(point.getLat(), point.getLon(), res); + // check invalid resolutions + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> H3.h3ToNotIntersectingChildrenSize(H3.geoToH3(point.getLat(), point.getLon(), H3.MAX_H3_RES)) + ); + assertThat(ex.getMessage(), Matchers.containsString("Invalid child resolution")); + // check against brute force counting + long numChildren = H3.h3ToNotIntersectingChildrenSize(h3Address); + assertEquals(H3.h3ToNoChildrenIntersecting(h3Address).length, numChildren); + } + public void testParentChild() { String[] h3Addresses = H3.getStringRes0Cells(); String h3Address = RandomPicks.randomFrom(random(), h3Addresses); @@ -31,6 +91,9 @@ public void testParentChild() { values[0] = h3Address; for (int i = 1; i < H3.MAX_H3_RES; i++) { h3Addresses = H3.h3ToChildren(h3Address); + // check all elements are unique + Set mySet = Sets.newHashSet(h3Addresses); + assertEquals(mySet.size(), h3Addresses.length); h3Address = RandomPicks.randomFrom(random(), h3Addresses); values[i] = h3Address; } @@ -44,11 +107,11 @@ public void testParentChild() { public void testHexRing() { String[] h3Addresses = H3.getStringRes0Cells(); - String h3Address = RandomPicks.randomFrom(random(), h3Addresses); for (int i = 1; i < H3.MAX_H3_RES; i++) { + String h3Address = RandomPicks.randomFrom(random(), h3Addresses); + assertEquals(i - 1, H3.getResolution(h3Address)); h3Addresses = H3.h3ToChildren(h3Address); assertHexRing(i, h3Address, h3Addresses); - h3Address = RandomPicks.randomFrom(random(), h3Addresses); } } @@ -65,4 +128,55 @@ private void assertHexRing(int res, String h3Address, String[] children) { assertEquals(children[i], ring[positions[i - 1]]); } } + + public void testNoChildrenIntersecting() { + String[] h3Addresses = H3.getStringRes0Cells(); + String h3Address = RandomPicks.randomFrom(random(), h3Addresses); + for (int i = 1; i <= H3.MAX_H3_RES; i++) { + h3Addresses = H3.h3ToChildren(h3Address); + assertIntersectingChildren(h3Address, h3Addresses); + h3Address = RandomPicks.randomFrom(random(), h3Addresses); + } + } + + private void assertIntersectingChildren(String h3Address, String[] children) { + int size = H3.h3ToNotIntersectingChildrenSize(h3Address); + for (int i = 0; i < size; i++) { + GeoPolygon p = getGeoPolygon(H3.noChildIntersectingPosToH3(h3Address, i)); + int intersections = 0; + for (String o : children) { + if (p.intersects(getGeoPolygon(o))) { + intersections++; + } + } + assertEquals(2, intersections); + } + } + + private GeoPolygon getGeoPolygon(String h3Address) { + CellBoundary cellBoundary = H3.h3ToGeoBoundary(h3Address); + List points = new ArrayList<>(cellBoundary.numPoints()); + for (int i = 0; i < cellBoundary.numPoints(); i++) { + LatLng latLng = cellBoundary.getLatLon(i); + points.add(new GeoPoint(PlanetModel.SPHERE, latLng.getLatRad(), latLng.getLonRad())); + } + return GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points); + } + + public void testHexRingPos() { + String[] h3Addresses = H3.getStringRes0Cells(); + for (int i = 0; i < H3.MAX_H3_RES; i++) { + String h3Address = RandomPicks.randomFrom(random(), h3Addresses); + assertHexRing3(h3Address); + h3Addresses = H3.h3ToChildren(h3Address); + } + } + + private void assertHexRing3(String h3Address) { + String[] ring = H3.hexRing(h3Address); + assertEquals(ring.length, H3.hexRingSize(h3Address)); + for (int i = 0; i < H3.hexRingSize(h3Address); i++) { + assertEquals(ring[i], H3.hexRingPosToH3(h3Address, i)); + } + } } diff --git a/libs/logging/build.gradle b/libs/logging/build.gradle index 3859c0b12ced..3004af029cb5 100644 --- a/libs/logging/build.gradle +++ b/libs/logging/build.gradle @@ -9,7 +9,6 @@ apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.build' - tasks.named("loggerUsageCheck").configure {enabled = false } dependencies { diff --git a/libs/plugin-analysis-api/build.gradle b/libs/plugin-analysis-api/build.gradle index 184295ebdcc5..bc9b305aa5a4 100644 --- a/libs/plugin-analysis-api/build.gradle +++ b/libs/plugin-analysis-api/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.internal.info.BuildParams + /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License @@ -9,6 +11,7 @@ apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.build' +group = "org.elasticsearch.plugin" tasks.named("loggerUsageCheck").configure {enabled = false } @@ -26,3 +29,13 @@ tasks.named('forbiddenApisMain').configure { tasks.named("dependencyLicenses").configure { mapping from: /lucene-.*/, to: 'lucene' } + +tasks.named("thirdPartyAudit").configure { + if (BuildParams.runtimeJavaVersion == JavaVersion.VERSION_20) { + ignoreMissingClasses( + // This class was removed in Java 20 but is only referenced by a class that requires preview features anyhow + // See: https://github.com/apache/lucene/pull/12042 + 'java.lang.foreign.MemorySession', + ) + } +} diff --git a/libs/plugin-analysis-api/src/main/java/module-info.java b/libs/plugin-analysis-api/src/main/java/module-info.java index 899a5cd58a14..4652e6b77f7c 100644 --- a/libs/plugin-analysis-api/src/main/java/module-info.java +++ b/libs/plugin-analysis-api/src/main/java/module-info.java @@ -6,9 +6,9 @@ * Side Public License, v 1. */ -module org.elasticsearch.plugin.analysis.api { +module org.elasticsearch.plugin.analysis { requires org.apache.lucene.core; - requires org.elasticsearch.plugin.api; + requires org.elasticsearch.plugin; - exports org.elasticsearch.plugin.analysis.api; + exports org.elasticsearch.plugin.analysis; } diff --git a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/AnalysisMode.java b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/AnalysisMode.java similarity index 98% rename from libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/AnalysisMode.java rename to libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/AnalysisMode.java index f6a1e4ba70ca..5180b3168fcb 100644 --- a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/AnalysisMode.java +++ b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/AnalysisMode.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugin.analysis.api; +package org.elasticsearch.plugin.analysis; /** * Enum representing the mode in which token filters and analyzers are allowed to operate. diff --git a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/AnalyzerFactory.java b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/AnalyzerFactory.java similarity index 82% rename from libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/AnalyzerFactory.java rename to libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/AnalyzerFactory.java index 625f753a46d2..dfda8f292910 100644 --- a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/AnalyzerFactory.java +++ b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/AnalyzerFactory.java @@ -6,11 +6,11 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugin.analysis.api; +package org.elasticsearch.plugin.analysis; import org.apache.lucene.analysis.Analyzer; -import org.elasticsearch.plugin.api.Extensible; -import org.elasticsearch.plugin.api.Nameable; +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.Nameable; /** * An analysis component used to create Analyzers. diff --git a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/CharFilterFactory.java b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/CharFilterFactory.java similarity index 86% rename from libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/CharFilterFactory.java rename to libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/CharFilterFactory.java index 7095e32050cf..cb8cbeba01b2 100644 --- a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/CharFilterFactory.java +++ b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/CharFilterFactory.java @@ -6,10 +6,10 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugin.analysis.api; +package org.elasticsearch.plugin.analysis; -import org.elasticsearch.plugin.api.Extensible; -import org.elasticsearch.plugin.api.Nameable; +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.Nameable; import java.io.Reader; diff --git a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/TokenFilterFactory.java b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenFilterFactory.java similarity index 90% rename from libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/TokenFilterFactory.java rename to libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenFilterFactory.java index 119918bd5b21..671390f99539 100644 --- a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/TokenFilterFactory.java +++ b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenFilterFactory.java @@ -6,11 +6,11 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugin.analysis.api; +package org.elasticsearch.plugin.analysis; import org.apache.lucene.analysis.TokenStream; -import org.elasticsearch.plugin.api.Extensible; -import org.elasticsearch.plugin.api.Nameable; +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.Nameable; /** * An analysis component used to create token filters. diff --git a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/TokenizerFactory.java b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenizerFactory.java similarity index 81% rename from libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/TokenizerFactory.java rename to libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenizerFactory.java index b6a0e69d5223..9222c296ebd9 100644 --- a/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/api/TokenizerFactory.java +++ b/libs/plugin-analysis-api/src/main/java/org/elasticsearch/plugin/analysis/TokenizerFactory.java @@ -6,11 +6,11 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugin.analysis.api; +package org.elasticsearch.plugin.analysis; import org.apache.lucene.analysis.Tokenizer; -import org.elasticsearch.plugin.api.Extensible; -import org.elasticsearch.plugin.api.Nameable; +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.Nameable; /** * An analysis component used to create tokenizers. diff --git a/libs/plugin-api/build.gradle b/libs/plugin-api/build.gradle index 8ca0f53e7547..8dfe7ce9c793 100644 --- a/libs/plugin-api/build.gradle +++ b/libs/plugin-api/build.gradle @@ -9,6 +9,7 @@ apply plugin: 'elasticsearch.publish' apply plugin: 'elasticsearch.build' +group = "org.elasticsearch.plugin" tasks.named("loggerUsageCheck").configure {enabled = false } diff --git a/libs/plugin-api/src/main/java/module-info.java b/libs/plugin-api/src/main/java/module-info.java index 28a6c9319cef..03946a839f7b 100644 --- a/libs/plugin-api/src/main/java/module-info.java +++ b/libs/plugin-api/src/main/java/module-info.java @@ -6,6 +6,7 @@ * Side Public License, v 1. */ -module org.elasticsearch.plugin.api { - exports org.elasticsearch.plugin.api; +module org.elasticsearch.plugin { + exports org.elasticsearch.plugin; + exports org.elasticsearch.plugin.settings; } diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Extensible.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Extensible.java new file mode 100644 index 000000000000..feb65d625e48 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Extensible.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.TYPE; + +/** + * Marker for things that can be loaded by component loader. + * + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(value = { TYPE }) +public @interface Extensible { +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Inject.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Inject.java new file mode 100644 index 000000000000..c9b1f4b3c50a --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Inject.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used to mark constructor to inject plugin dependencies iee. settings. + * A constructor parameter has to be an interface marked with appropriate annotation (i.e AnalysisSetting) + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.CONSTRUCTOR) +public @interface Inject { +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/Nameable.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Nameable.java similarity index 96% rename from libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/Nameable.java rename to libs/plugin-api/src/main/java/org/elasticsearch/plugin/Nameable.java index 79b9c5298ffc..373aa94b7b8c 100644 --- a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/Nameable.java +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/Nameable.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.plugin.api; +package org.elasticsearch.plugin; /** * A named plugin component. Components with a name can be registered and fetched under a name given in diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/NamedComponent.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/NamedComponent.java new file mode 100644 index 000000000000..44f8e6dd1317 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/NamedComponent.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used on plugin components which will be registered under a given name by Elasticsearch server. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ ElementType.TYPE }) +public @interface NamedComponent { + /** + * The name used for registration and lookup + * @return a name + */ + String value(); +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/Extensible.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/Extensible.java deleted file mode 100644 index 0f9bfa3ea8ef..000000000000 --- a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/Extensible.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugin.api; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.TYPE; - -/** - * Marker for things that can be loaded by component loader. - * - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(value = { TYPE }) -public @interface Extensible { -} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/NamedComponent.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/NamedComponent.java deleted file mode 100644 index ca62f8dd5aab..000000000000 --- a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/NamedComponent.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.plugin.api; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * An annotation used on plugin components which will be registered under a given name by Elasticsearch server. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target({ ElementType.TYPE }) -public @interface NamedComponent { - /** - * The name used for registration and lookup - * @return a name - */ - String value(); -} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/package-info.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/package-info.java deleted file mode 100644 index b8ecb3165f32..000000000000 --- a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/api/package-info.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -/** - * An api used by plugin developers to implement custom Elasticsearch plugins. - * The package convention in plugin apis is as follows: - *

    - *
  • The root package is org.elasticsearch.plugin
  • - *
  • Specialised API jars have their name following the root package. - * i.e. org.elasticsearch.plugin.analysis - *
  • - *
  • Interfaces and annotations used by plugin developers are in `api` package - * i.e org.elasticsearch.plugin.analysis.api or org.elasticsearch.plugin.api - *
  • packages which are not meant to be used by plugin developers should not be subpackages of api - * i.e org.elasticsearch.plugin.analysis.internal - *
  • - *
- */ -package org.elasticsearch.plugin.api; diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/package-info.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/package-info.java new file mode 100644 index 000000000000..9e8ad40ef028 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +/** + * An api used by plugin developers to implement custom Elasticsearch plugins. + * The package convention in plugin apis is as follows: + *
    + *
  • The root package is org.elasticsearch.plugin
  • + *
  • Specialised API jars have their name following the root package. + * Interfaces and annotations used by plugin developers should be placed under it. + * i.e. org.elasticsearch.plugin.analysis + *
  • + *
  • packages which are not meant to be used by plugin developers should be under internal package suffix + * i.e org.elasticsearch.plugin.analysis.internal + *
  • + *
+ */ +package org.elasticsearch.plugin; diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/AnalysisSettings.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/AnalysisSettings.java new file mode 100644 index 000000000000..c0e6a59e0425 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/AnalysisSettings.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.settings; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import static java.lang.annotation.ElementType.TYPE; + +/** + * An annotation used to mark analysis setting interface + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(value = { TYPE }) +public @interface AnalysisSettings { +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/BooleanSetting.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/BooleanSetting.java new file mode 100644 index 000000000000..58749bea7d57 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/BooleanSetting.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.settings; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used to mark a setting of type Boolean + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface BooleanSetting { + /** + * A name of a setting + */ + String path(); + + /** + * A default value of a boolean setting + */ + boolean defaultValue(); +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/IntSetting.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/IntSetting.java new file mode 100644 index 000000000000..83f4ef40b423 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/IntSetting.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.settings; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used to mark a setting of type integer + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface IntSetting { + /** + * A name of a setting + */ + String path(); + + /** + * A default value of an int setting + */ + int defaultValue(); +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/ListSetting.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/ListSetting.java new file mode 100644 index 000000000000..5d561164bfb4 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/ListSetting.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.settings; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used to mark a setting of type list. + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface ListSetting { + /** + * A name of a setting + */ + String path(); +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/LongSetting.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/LongSetting.java new file mode 100644 index 000000000000..67af5baf9448 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/LongSetting.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.settings; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used to mark a setting of type Long + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface LongSetting { + /** + * A name of a setting + */ + String path(); + + /** + * A default value of a long setting + */ + long defaultValue(); +} diff --git a/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/StringSetting.java b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/StringSetting.java new file mode 100644 index 000000000000..e16c3af32877 --- /dev/null +++ b/libs/plugin-api/src/main/java/org/elasticsearch/plugin/settings/StringSetting.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.settings; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation used to mark a setting of type String + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +public @interface StringSetting { + /** + * A name of a setting + */ + String path(); + + /** + * A default value of a String setting + */ + String defaultValue(); +} diff --git a/libs/plugin-scanner/build.gradle b/libs/plugin-scanner/build.gradle new file mode 100644 index 000000000000..a612533bc76c --- /dev/null +++ b/libs/plugin-scanner/build.gradle @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +apply plugin: 'elasticsearch.publish' + +tasks.named("jarHell").configure { enabled = false } + + +tasks.named("dependencyLicenses").configure { + mapping from: /asm-.*/, to: 'asm' +} + +dependencies { + api project(':libs:elasticsearch-core') + api project(':libs:elasticsearch-plugin-api') + api project(":libs:elasticsearch-x-content") + + api 'org.ow2.asm:asm:9.4' + api 'org.ow2.asm:asm-tree:9.4' + + testImplementation "junit:junit:${versions.junit}" + testImplementation(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-plugin-scanner' + } +} +tasks.named('forbiddenApisMain').configure { + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + replaceSignatureFiles 'jdk-signatures' +} + diff --git a/libs/plugin-scanner/licenses/asm-LICENSE.txt b/libs/plugin-scanner/licenses/asm-LICENSE.txt new file mode 100644 index 000000000000..afb064f2f266 --- /dev/null +++ b/libs/plugin-scanner/licenses/asm-LICENSE.txt @@ -0,0 +1,26 @@ +Copyright (c) 2012 France Télécom +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. diff --git a/libs/plugin-scanner/licenses/asm-NOTICE.txt b/libs/plugin-scanner/licenses/asm-NOTICE.txt new file mode 100644 index 000000000000..8d1c8b69c3fc --- /dev/null +++ b/libs/plugin-scanner/licenses/asm-NOTICE.txt @@ -0,0 +1 @@ + diff --git a/libs/plugin-scanner/src/main/java/module-info.java b/libs/plugin-scanner/src/main/java/module-info.java new file mode 100644 index 000000000000..31f078289214 --- /dev/null +++ b/libs/plugin-scanner/src/main/java/module-info.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +module org.elasticsearch.plugin.scanner { + requires org.elasticsearch.base; + requires org.objectweb.asm; + requires org.elasticsearch.plugin; + requires org.elasticsearch.xcontent; + + exports org.elasticsearch.plugin.scanner; +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/AnnotatedHierarchyVisitor.java b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/AnnotatedHierarchyVisitor.java similarity index 98% rename from build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/AnnotatedHierarchyVisitor.java rename to libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/AnnotatedHierarchyVisitor.java index 77cb6c97ddb6..a372ff10683a 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/AnnotatedHierarchyVisitor.java +++ b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/AnnotatedHierarchyVisitor.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.gradle.plugin.scanner; +package org.elasticsearch.plugin.scanner; import org.objectweb.asm.AnnotationVisitor; import org.objectweb.asm.ClassVisitor; diff --git a/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java new file mode 100644 index 000000000000..c4490b0c27a3 --- /dev/null +++ b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassReaders.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner; + +import org.elasticsearch.core.PathUtils; +import org.objectweb.asm.ClassReader; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * A utility class containing methods to create streams of ASM's ClassReader + * + * @see ClassReader + */ +public class ClassReaders { + private static final String MODULE_INFO = "module-info.class"; + + /** + * This method must be used within a try-with-resources statement or similar + * control structure. + */ + public static List ofDirWithJars(Path dir) { + if (dir == null) { + return Collections.emptyList(); + } + try (var stream = Files.list(dir)) { + return ofPaths(stream); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public static List ofPaths(Set classpathFiles) { + return ofPaths(classpathFiles.stream().map(ClassReaders::toPath)); + } + + private static Path toPath(URL url) { + try { + return PathUtils.get(url.toURI()); + } catch (URISyntaxException e) { + throw new AssertionError(e); + } + } + + /** + * This method must be used within a try-with-resources statement or similar + * control structure. + */ + public static List ofPaths(Stream list) { + return list.filter(Files::exists).flatMap(p -> { + if (p.toString().endsWith(".jar")) { + return classesInJar(p).stream(); + } else { + return classesInPath(p).stream(); + } + }).toList(); + } + + private static List classesInJar(Path jar) { + try (FileSystem jarFs = FileSystems.newFileSystem(jar)) { + Path root = jarFs.getPath("/"); + return classesInPath(root); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static List classesInPath(Path root) { + try (var stream = Files.walk(root)) { + return stream.filter(p -> p.toString().endsWith(".class")) + .filter(p -> p.toString().endsWith(MODULE_INFO) == false) + .filter(p -> p.toString().startsWith("/META-INF") == false)// skip multi-release files + .map(p -> { + try (InputStream is = Files.newInputStream(p)) { + byte[] classBytes = is.readAllBytes(); + return new ClassReader(classBytes); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + }) + .collect(Collectors.toList()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public static List ofClassPath() throws IOException { + String classpath = System.getProperty("java.class.path"); + return ofClassPath(classpath); + } + + public static List ofClassPath(String classpath) { + if (classpath != null && classpath.equals("") == false) {// todo when do we set cp to "" ? + var classpathSeparator = System.getProperty("path.separator"); + + String[] pathelements = classpath.split(classpathSeparator); + return ofPaths(Arrays.stream(pathelements).map(Paths::get)); + } + return Collections.emptyList(); + } +} diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/ClassScanner.java b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassScanner.java similarity index 87% rename from build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/ClassScanner.java rename to libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassScanner.java index e63ee65e4a1c..4341ac729cba 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/plugin/scanner/ClassScanner.java +++ b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/ClassScanner.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.gradle.plugin.scanner; +package org.elasticsearch.plugin.scanner; import org.objectweb.asm.AnnotationVisitor; import org.objectweb.asm.ClassReader; @@ -15,10 +15,10 @@ import java.util.Deque; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.BiFunction; -import java.util.stream.Stream; public class ClassScanner { private final Map foundClasses; @@ -32,10 +32,8 @@ public ClassScanner(String targetAnnotation, BiFunction classReaderStream) { - try (classReaderStream) { - classReaderStream.forEach(classReader -> classReader.accept(annotatedHierarchyVisitor, ClassReader.SKIP_CODE)); - } + public void visit(List classReaders) { + classReaders.forEach(classReader -> classReader.accept(annotatedHierarchyVisitor, ClassReader.SKIP_CODE)); addExtensibleDescendants(annotatedHierarchyVisitor.getClassHierarchy()); } diff --git a/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java new file mode 100644 index 000000000000..e306cf93bba4 --- /dev/null +++ b/libs/plugin-scanner/src/main/java/org/elasticsearch/plugin/scanner/NamedComponentScanner.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner; + +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.NamedComponent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.objectweb.asm.AnnotationVisitor; +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.Opcodes; +import org.objectweb.asm.Type; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class NamedComponentScanner { + + // main method to be used by gradle build plugin + public static void main(String[] args) throws IOException { + List classReaders = ClassReaders.ofClassPath(); + + NamedComponentScanner scanner = new NamedComponentScanner(); + Map> namedComponentsMap = scanner.scanForNamedClasses(classReaders); + Path outputFile = Path.of(args[0]); + scanner.writeToFile(namedComponentsMap, outputFile); + } + + // scope for testing + public void writeToFile(Map> namedComponentsMap, Path outputFile) throws IOException { + Files.createDirectories(outputFile.getParent()); + + try (OutputStream outputStream = Files.newOutputStream(outputFile)) { + try (XContentBuilder namedComponents = XContentFactory.jsonBuilder(outputStream)) { + namedComponents.startObject(); + for (Map.Entry> extensibleToComponents : namedComponentsMap.entrySet()) { + namedComponents.startObject(extensibleToComponents.getKey());// extensible class name + for (Map.Entry components : extensibleToComponents.getValue().entrySet()) { + namedComponents.field(components.getKey(), components.getValue());// component name : component class + } + namedComponents.endObject(); + } + namedComponents.endObject(); + } + } + + } + + // returns a Map - extensible interface -> map{ namedName -> className } + public Map> scanForNamedClasses(List classReaders) { + ClassScanner extensibleClassScanner = new ClassScanner(Type.getDescriptor(Extensible.class), (classname, map) -> { + map.put(classname, classname); + return null; + }); + extensibleClassScanner.visit(classReaders); + + ClassScanner namedComponentsScanner = new ClassScanner( + Type.getDescriptor(NamedComponent.class), + (classname, map) -> new AnnotationVisitor(Opcodes.ASM9) { + @Override + public void visit(String key, Object value) { + assert key.equals("value"); + assert value instanceof String; + map.put(value.toString(), classname); + } + } + ); + + namedComponentsScanner.visit(classReaders); + + Map> componentInfo = new HashMap<>(); + for (var e : namedComponentsScanner.getFoundClasses().entrySet()) { + String name = e.getKey(); + String classnameWithSlashes = e.getValue(); + String extensibleClassnameWithSlashes = extensibleClassScanner.getFoundClasses().get(classnameWithSlashes); + if (extensibleClassnameWithSlashes == null) { + throw new RuntimeException( + "Named component " + name + "(" + pathToClassName(classnameWithSlashes) + ") does not extend from an extensible class" + ); + } + var named = componentInfo.computeIfAbsent(pathToClassName(extensibleClassnameWithSlashes), k -> new HashMap<>()); + named.put(name, pathToClassName(classnameWithSlashes)); + } + return componentInfo; + } + + private String pathToClassName(String classWithSlashes) { + return classWithSlashes.replace('/', '.'); + } + +} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/AnnotatedHierarchyVisitorTests.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/AnnotatedHierarchyVisitorTests.java new file mode 100644 index 000000000000..8aaa36b7f9a0 --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/AnnotatedHierarchyVisitorTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner; + +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.plugin.NamedComponent; +import org.elasticsearch.plugin.scanner.test_model.ExtensibleClass; +import org.elasticsearch.plugin.scanner.test_model.ExtensibleInterface; +import org.elasticsearch.plugin.scanner.test_model.ImplementingExtensible; +import org.elasticsearch.plugin.scanner.test_model.SubClass; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.Type; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URISyntaxException; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +public class AnnotatedHierarchyVisitorTests extends ESTestCase { + Set foundClasses; + AnnotatedHierarchyVisitor visitor; + + @Before + public void init() { + foundClasses = new HashSet<>(); + visitor = new AnnotatedHierarchyVisitor(Type.getDescriptor(Extensible.class), (className) -> { + foundClasses.add(className); + return null; + }); + } + + public void testNoClassesAnnotated() throws IOException, URISyntaxException { + performScan(visitor, NamedComponent.class); + + assertTrue(foundClasses.isEmpty()); + } + + public void testSingleAnnotatedClass() throws IOException, URISyntaxException { + performScan(visitor, ExtensibleClass.class); + + assertThat(foundClasses, equalTo(Set.of(classNameToPath(ExtensibleClass.class)))); + } + + public void testSubClassofExtensible() throws IOException, URISyntaxException { + performScan(visitor, ExtensibleClass.class, SubClass.class); + + assertThat(foundClasses, equalTo(Set.of(classNameToPath(ExtensibleClass.class)))); + assertThat( + visitor.getClassHierarchy(), + equalTo(Map.of(classNameToPath(ExtensibleClass.class), Set.of(classNameToPath(SubClass.class)))) + ); + } + + public void testSubInterfaceOfExtensible() throws IOException, URISyntaxException { + performScan(visitor, ImplementingExtensible.class, ExtensibleInterface.class); + + assertThat(foundClasses, equalTo(Set.of(classNameToPath(ExtensibleInterface.class)))); + assertThat( + visitor.getClassHierarchy(), + equalTo(Map.of(classNameToPath(ExtensibleInterface.class), Set.of(classNameToPath(ImplementingExtensible.class)))) + ); + } + + private String classNameToPath(Class clazz) { + return clazz.getCanonicalName().replace(".", "/"); + } + + private void performScan(AnnotatedHierarchyVisitor classVisitor, Class... classes) throws IOException, URISyntaxException { + for (Class clazz : classes) { + String className = classNameToPath(clazz) + ".class"; + var stream = this.getClass().getClassLoader().getResourceAsStream(className); + try (InputStream fileInputStream = stream) { + ClassReader cr = new ClassReader(fileInputStream); + cr.accept(classVisitor, 0); + } + } + } + +} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java new file mode 100644 index 000000000000..764065bc462b --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassReadersTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner; + +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; +import org.hamcrest.Matchers; +import org.objectweb.asm.ClassReader; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class ClassReadersTests extends ESTestCase { + + public void testModuleInfoIsNotReturnedAsAClassFromJar() throws IOException { + final Path tmp = createTempDir(getTestName()); + final Path dirWithJar = tmp.resolve("jars-dir"); + Files.createDirectories(dirWithJar); + Path jar = dirWithJar.resolve("api.jar"); + JarUtils.createJarWithEntries(jar, Map.of("module-info.class", InMemoryJavaCompiler.compile("module-info", """ + module p {} + """))); + + List classReaders = ClassReaders.ofPaths(Stream.of(jar)); + org.hamcrest.MatcherAssert.assertThat(classReaders, Matchers.empty()); + + // aggressively delete the jar dir, so that any leaked filed handles fail this specific test on windows + IOUtils.rm(tmp); + } + + public void testTwoClassesInAStreamFromJar() throws IOException { + final Path tmp = createTempDir(getTestName()); + final Path dirWithJar = tmp.resolve("jars-dir"); + Files.createDirectories(dirWithJar); + Path jar = dirWithJar.resolve("api.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """ + package p; + public class A {} + """), "p/B.class", InMemoryJavaCompiler.compile("p.B", """ + package p; + public class B {} + """))); + + List classReaders = ClassReaders.ofPaths(Stream.of(jar)); + List collect = classReaders.stream().map(cr -> cr.getClassName()).collect(Collectors.toList()); + org.hamcrest.MatcherAssert.assertThat(collect, Matchers.containsInAnyOrder("p/A", "p/B")); + + // aggressively delete the jar dir, so that any leaked filed handles fail this specific test on windows + IOUtils.rm(tmp); + } + + public void testStreamOfJarsAndIndividualClasses() throws IOException { + final Path tmp = createTempDir(getTestName()); + final Path dirWithJar = tmp.resolve("jars-dir"); + Files.createDirectories(dirWithJar); + + Path jar = dirWithJar.resolve("a_b.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """ + package p; + public class A {} + """), "p/B.class", InMemoryJavaCompiler.compile("p.B", """ + package p; + public class B {} + """))); + + Path jar2 = dirWithJar.resolve("c_d.jar"); + JarUtils.createJarWithEntries(jar2, Map.of("p/C.class", InMemoryJavaCompiler.compile("p.C", """ + package p; + public class C {} + """), "p/D.class", InMemoryJavaCompiler.compile("p.D", """ + package p; + public class D {} + """))); + + InMemoryJavaCompiler.compile("p.E", """ + package p; + public class E {} + """); + Files.write(tmp.resolve("E.class"), InMemoryJavaCompiler.compile("p.E", """ + package p; + public class E {} + """)); + + List classReaders = ClassReaders.ofPaths(Stream.of(tmp, jar, jar2)); + List collect = classReaders.stream().map(cr -> cr.getClassName()).collect(Collectors.toList()); + org.hamcrest.MatcherAssert.assertThat(collect, Matchers.containsInAnyOrder("p/A", "p/B", "p/C", "p/D", "p/E")); + } + + public void testMultipleJarsInADir() throws IOException { + final Path tmp = createTempDir(getTestName()); + final Path dirWithJar = tmp.resolve("jars-dir"); + Files.createDirectories(dirWithJar); + + Path jar = dirWithJar.resolve("a_b.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """ + package p; + public class A {} + """), "p/B.class", InMemoryJavaCompiler.compile("p.B", """ + package p; + public class B {} + """))); + + Path jar2 = dirWithJar.resolve("c_d.jar"); + JarUtils.createJarWithEntries(jar2, Map.of("p/C.class", InMemoryJavaCompiler.compile("p.C", """ + package p; + public class C {} + """), "p/D.class", InMemoryJavaCompiler.compile("p.D", """ + package p; + public class D {} + """))); + + List classReaders = ClassReaders.ofDirWithJars(dirWithJar); + List collect = classReaders.stream().map(cr -> cr.getClassName()).collect(Collectors.toList()); + org.hamcrest.MatcherAssert.assertThat(collect, Matchers.containsInAnyOrder("p/A", "p/B", "p/C", "p/D")); + } +} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassScannerTests.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassScannerTests.java new file mode 100644 index 000000000000..b8b35dfb568a --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/ClassScannerTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner; + +import org.elasticsearch.plugin.Extensible; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.Type; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class ClassScannerTests extends ESTestCase { + static final System.Logger logger = System.getLogger(ClassScannerTests.class.getName()); + + public void testClassAndInterfaceHierarchy() throws IOException { + var reader = new ClassScanner(Type.getDescriptor(Extensible.class), (classname, map) -> { + map.put(classname, classname); + return null; + }); + List classReaders = ClassReaders.ofClassPath(); + logger.log(System.Logger.Level.INFO, "classReaderStream size " + classReaders.size()); + + reader.visit(classReaders); + Map extensibleClasses = reader.getFoundClasses(); + + org.hamcrest.MatcherAssert.assertThat( + extensibleClasses, + Matchers.allOf( + Matchers.hasEntry( + "org/elasticsearch/plugin/scanner/test_model/ExtensibleClass", + "org/elasticsearch/plugin/scanner/test_model/ExtensibleClass" + ), + Matchers.hasEntry( + "org/elasticsearch/plugin/scanner/test_model/ImplementingExtensible", + "org/elasticsearch/plugin/scanner/test_model/ExtensibleInterface" + ), + Matchers.hasEntry( + "org/elasticsearch/plugin/scanner/test_model/SubClass", + "org/elasticsearch/plugin/scanner/test_model/ExtensibleClass" + ) + ) + ); + } + +} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java new file mode 100644 index 000000000000..cf81b81963c7 --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/NamedComponentScannerTests.java @@ -0,0 +1,227 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner; + +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.plugin.scanner.test_model.ExtensibleClass; +import org.elasticsearch.plugin.scanner.test_model.ExtensibleInterface; +import org.elasticsearch.plugin.scanner.test_model.TestNamedComponent; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; +import org.objectweb.asm.ClassReader; + +import java.io.IOException; +import java.io.InputStream; +import java.io.UncheckedIOException; +import java.net.URISyntaxException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; + +public class NamedComponentScannerTests extends ESTestCase { + + private Path tmpDir() throws IOException { + return createTempDir(); + } + + NamedComponentScanner namedComponentScanner = new NamedComponentScanner(); + + public void testFindNamedComponentInSingleClass() throws URISyntaxException { + Map> namedComponents = namedComponentScanner.scanForNamedClasses( + classReaderStream(TestNamedComponent.class, ExtensibleInterface.class) + ); + + org.hamcrest.MatcherAssert.assertThat( + namedComponents, + equalTo( + Map.of( + ExtensibleInterface.class.getCanonicalName(), + Map.of("test_named_component", TestNamedComponent.class.getCanonicalName()) + ) + ) + ); + + } + + public void testNamedComponentsAreFoundWhenSingleJarProvided() throws IOException { + final Path tmp = tmpDir(); + final Path dirWithJar = tmp.resolve("jars-dir"); + Files.createDirectories(dirWithJar); + Path jar = dirWithJar.resolve("plugin.jar"); + JarUtils.createJarWithEntries(jar, Map.of("p/A.class", InMemoryJavaCompiler.compile("p.A", """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugin.scanner.test_model.*; + @NamedComponent("a_component") + public class A extends ExtensibleClass {} + """), "p/B.class", InMemoryJavaCompiler.compile("p.B", """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugin.scanner.test_model.*; + @NamedComponent("b_component") + public class B implements ExtensibleInterface{} + """))); + List classReaderStream = Stream.concat( + ClassReaders.ofDirWithJars(dirWithJar).stream(), + ClassReaders.ofClassPath().stream() + )// contains plugin-api + .toList(); + + Map> namedComponents = namedComponentScanner.scanForNamedClasses(classReaderStream); + + org.hamcrest.MatcherAssert.assertThat( + namedComponents, + equalTo( + Map.of( + ExtensibleClass.class.getCanonicalName(), + Map.of("a_component", "p.A"), + ExtensibleInterface.class.getCanonicalName(), + Map.of( + "b_component", + "p.B", + // noise from classpath + "test_named_component", + "org.elasticsearch.plugin.scanner.test_model.TestNamedComponent" + ) + ) + ) + ); + + // aggressively delete the jar dir, so that any leaked filed handles fail this specific test on windows + IOUtils.rm(tmp); + } + + public void testNamedComponentsCanExtednCommonSuperClass() throws IOException { + Map sources = Map.of( + "p.CustomExtensibleInterface", + """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugin.scanner.test_model.*; + public interface CustomExtensibleInterface extends ExtensibleInterface {} + """, + // note that this class implements a custom interface + "p.CustomExtensibleClass", + """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugin.scanner.test_model.*; + public class CustomExtensibleClass implements CustomExtensibleInterface {} + """, + "p.A", + """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugin.scanner.test_model.*; + @NamedComponent("a_component") + public class A extends CustomExtensibleClass {} + """, + "p.B", + """ + package p; + import org.elasticsearch.plugin.*; + import org.elasticsearch.plugin.scanner.test_model.*; + @NamedComponent("b_component") + public class B implements CustomExtensibleInterface{} + """ + ); + var classToBytes = InMemoryJavaCompiler.compile(sources); + + Map jarEntries = new HashMap<>(); + jarEntries.put("p/CustomExtensibleInterface.class", classToBytes.get("p.CustomExtensibleInterface")); + jarEntries.put("p/CustomExtensibleClass.class", classToBytes.get("p.CustomExtensibleClass")); + jarEntries.put("p/A.class", classToBytes.get("p.A")); + jarEntries.put("p/B.class", classToBytes.get("p.B")); + + final Path tmp = tmpDir(); + final Path dirWithJar = tmp.resolve("jars-dir"); + Files.createDirectories(dirWithJar); + Path jar = dirWithJar.resolve("plugin.jar"); + JarUtils.createJarWithEntries(jar, jarEntries); + + Stream classPath = ClassReaders.ofClassPath().stream(); + List classReaders = Stream.concat(ClassReaders.ofDirWithJars(dirWithJar).stream(), classPath)// contains plugin-api + .toList(); + + Map> namedComponents = namedComponentScanner.scanForNamedClasses(classReaders); + + org.hamcrest.MatcherAssert.assertThat( + namedComponents, + equalTo( + Map.of( + ExtensibleInterface.class.getCanonicalName(), + Map.of( + "a_component", + "p.A", + "b_component", + "p.B", + "test_named_component", + "org.elasticsearch.plugin.scanner.test_model.TestNamedComponent"// noise from classpath + ) + ) + ) + ); + + // aggressively delete the jar dir, so that any leaked filed handles fail this specific test on windows + IOUtils.rm(tmp); + } + + public void testWriteToFile() throws IOException { + Map extensibleInterfaceComponents = new LinkedHashMap<>(); + extensibleInterfaceComponents.put("a_component", "p.A"); + extensibleInterfaceComponents.put("b_component", "p.B"); + Map> mapToWrite = new LinkedHashMap<>(); + mapToWrite.put(ExtensibleInterface.class.getCanonicalName(), extensibleInterfaceComponents); + + Path path = tmpDir().resolve("file.json"); + namedComponentScanner.writeToFile(mapToWrite, path); + + String jsonMap = Files.readString(path); + assertThat(jsonMap, equalTo(""" + { + "org.elasticsearch.plugin.scanner.test_model.ExtensibleInterface": { + "a_component": "p.A", + "b_component": "p.B" + } + } + """.replaceAll("[\n\r\s]", ""))); + } + + private List classReaderStream(Class... classes) { + try { + return Arrays.stream(classes).map(clazz -> { + String className = classNameToPath(clazz) + ".class"; + var stream = this.getClass().getClassLoader().getResourceAsStream(className); + try (InputStream is = stream) { + byte[] classBytes = is.readAllBytes(); + ClassReader classReader = new ClassReader(classBytes); + return classReader; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }).collect(Collectors.toList()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private String classNameToPath(Class clazz) { + return clazz.getCanonicalName().replace(".", "/"); + } + +} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ExtensibleClass.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ExtensibleClass.java new file mode 100644 index 000000000000..ad055a47629f --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ExtensibleClass.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner.test_model; + +import org.elasticsearch.plugin.Extensible; + +@Extensible +public class ExtensibleClass {} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ExtensibleInterface.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ExtensibleInterface.java new file mode 100644 index 000000000000..a99f0a1c8da4 --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ExtensibleInterface.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner.test_model; + +import org.elasticsearch.plugin.Extensible; + +@Extensible +public interface ExtensibleInterface {} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ImplementingExtensible.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ImplementingExtensible.java new file mode 100644 index 000000000000..2b82972624f5 --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/ImplementingExtensible.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner.test_model; + +public class ImplementingExtensible implements ExtensibleInterface {} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/SubClass.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/SubClass.java new file mode 100644 index 000000000000..b11709023594 --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/SubClass.java @@ -0,0 +1,11 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner.test_model; + +public class SubClass extends ExtensibleClass {} diff --git a/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/TestNamedComponent.java b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/TestNamedComponent.java new file mode 100644 index 000000000000..71db778ca4e1 --- /dev/null +++ b/libs/plugin-scanner/src/test/java/org/elasticsearch/plugin/scanner/test_model/TestNamedComponent.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.plugin.scanner.test_model; + +import org.elasticsearch.plugin.NamedComponent; + +@NamedComponent("test_named_component") +public class TestNamedComponent implements ExtensibleInterface { + +} diff --git a/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java b/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java index fa98bc1bacc6..68d2f45386c5 100644 --- a/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java +++ b/libs/secure-sm/src/main/java/org/elasticsearch/secure_sm/SecureSM.java @@ -162,8 +162,12 @@ private static boolean isInnocuousThread(Thread t) { protected void checkThreadAccess(Thread t) { Objects.requireNonNull(t); - // first, check if we can modify threads at all. - checkPermission(MODIFY_THREAD_PERMISSION); + boolean targetThreadIsInnocuous = isInnocuousThread(t); + // we don't need to check if innocuous thread is modifying itself (like changes its name) + if (Thread.currentThread() != t || targetThreadIsInnocuous == false) { + // first, check if we can modify threads at all. + checkPermission(MODIFY_THREAD_PERMISSION); + } // check the threadgroup, if its our thread group or an ancestor, its fine. final ThreadGroup source = Thread.currentThread().getThreadGroup(); @@ -171,7 +175,7 @@ protected void checkThreadAccess(Thread t) { if (target == null) { return; // its a dead thread, do nothing. - } else if (source.parentOf(target) == false && isInnocuousThread(t) == false) { + } else if (source.parentOf(target) == false && targetThreadIsInnocuous == false) { checkPermission(MODIFY_ARBITRARY_THREAD_PERMISSION); } } diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationKeys.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationKeys.java index 7884a417c39c..83dbd919a60e 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationKeys.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationKeys.java @@ -71,6 +71,12 @@ public class SslConfigurationKeys { */ public static final String TRUSTSTORE_ALGORITHM = "truststore.algorithm"; + /** + * The fields from the X509 certificate used for restricted trust. Internationally omitted from the list of setting returned by methods + * in this class. This is not a general purpose ssl configuration. + */ + public static final String TRUST_RESTRICTIONS_X509_FIELDS = "trust_restrictions.x509_fields"; + // Key Management // -- Keystore /** diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index 4acee2dfcec0..f0f1bbfd9ea7 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -8,12 +8,15 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.core.Nullable; + import java.nio.file.Path; import java.security.KeyStore; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -42,6 +45,7 @@ import static org.elasticsearch.common.ssl.SslConfigurationKeys.TRUSTSTORE_PATH; import static org.elasticsearch.common.ssl.SslConfigurationKeys.TRUSTSTORE_SECURE_PASSWORD; import static org.elasticsearch.common.ssl.SslConfigurationKeys.TRUSTSTORE_TYPE; +import static org.elasticsearch.common.ssl.SslConfigurationKeys.TRUST_RESTRICTIONS_X509_FIELDS; import static org.elasticsearch.common.ssl.SslConfigurationKeys.VERIFICATION_MODE; /** @@ -115,6 +119,7 @@ public abstract class SslConfigurationLoader { static final List DEFAULT_CIPHERS = JDK12_CIPHERS; private static final char[] EMPTY_PASSWORD = new char[0]; + public static final List GLOBAL_DEFAULT_RESTRICTED_TRUST_FIELDS = List.of(X509Field.SAN_OTHERNAME_COMMONNAME); private final String settingPrefix; @@ -124,6 +129,7 @@ public abstract class SslConfigurationLoader { private SslClientAuthenticationMode defaultClientAuth; private List defaultCiphers; private List defaultProtocols; + private List defaultRestrictedTrustFields; private Function keyStoreFilter; @@ -147,6 +153,7 @@ public SslConfigurationLoader(String settingPrefix) { this.defaultClientAuth = SslClientAuthenticationMode.OPTIONAL; this.defaultProtocols = DEFAULT_PROTOCOLS; this.defaultCiphers = DEFAULT_CIPHERS; + this.defaultRestrictedTrustFields = GLOBAL_DEFAULT_RESTRICTED_TRUST_FIELDS; } /** @@ -204,6 +211,10 @@ public void setKeyStoreFilter(Function keyStoreFilter) { this.keyStoreFilter = keyStoreFilter; } + public void setDefaultRestrictedTrustFields(List x509Fields) { + this.defaultRestrictedTrustFields = x509Fields; + } + /** * Clients of this class should implement this method to determine whether there are any settings for a given prefix. * This is used to populate {@link SslConfiguration#explicitlyConfigured()}. @@ -255,9 +266,14 @@ public SslConfiguration load(Path basePath) { final List ciphers = resolveListSetting(CIPHERS, Function.identity(), defaultCiphers); final SslVerificationMode verificationMode = resolveSetting(VERIFICATION_MODE, SslVerificationMode::parse, defaultVerificationMode); final SslClientAuthenticationMode clientAuth = resolveSetting(CLIENT_AUTH, SslClientAuthenticationMode::parse, defaultClientAuth); + final List trustRestrictionsX509Fields = resolveListSetting( + TRUST_RESTRICTIONS_X509_FIELDS, + X509Field::parseForRestrictedTrust, + defaultRestrictedTrustFields + ); final SslKeyConfig keyConfig = buildKeyConfig(basePath); - final SslTrustConfig trustConfig = buildTrustConfig(basePath, verificationMode, keyConfig); + final SslTrustConfig trustConfig = buildTrustConfig(basePath, verificationMode, keyConfig, Set.copyOf(trustRestrictionsX509Fields)); if (protocols == null || protocols.isEmpty()) { throw new SslConfigException("no protocols configured in [" + settingPrefix + PROTOCOLS + "]"); @@ -278,7 +294,12 @@ public SslConfiguration load(Path basePath) { ); } - protected SslTrustConfig buildTrustConfig(Path basePath, SslVerificationMode verificationMode, SslKeyConfig keyConfig) { + protected SslTrustConfig buildTrustConfig( + Path basePath, + SslVerificationMode verificationMode, + SslKeyConfig keyConfig, + @Nullable Set restrictedTrustFields + ) { final List certificateAuthorities = resolveListSetting(CERTIFICATE_AUTHORITIES, Function.identity(), null); final String trustStorePath = resolveSetting(TRUSTSTORE_PATH, Function.identity(), null); @@ -330,7 +351,7 @@ public SslKeyConfig buildKeyConfig(Path basePath) { } if (certificatePath == null) { throw new SslConfigException( - "cannot specify [" + settingPrefix + KEYSTORE_PATH + "] without also setting [" + settingPrefix + CERTIFICATE + "]" + "cannot specify [" + settingPrefix + KEY + "] without also setting [" + settingPrefix + CERTIFICATE + "]" ); } final char[] password = resolvePasswordSetting(KEY_SECURE_PASSPHRASE, KEY_LEGACY_PASSPHRASE); diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/X509Field.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/X509Field.java new file mode 100644 index 000000000000..64de9adb8c22 --- /dev/null +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/X509Field.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.common.ssl; + +import java.util.EnumSet; +import java.util.stream.Collectors; + +/** + * An enumeration for referencing parts of an X509 certificate by a canonical string value. + */ +public enum X509Field { + + SAN_OTHERNAME_COMMONNAME("subjectAltName.otherName.commonName", true), + SAN_DNS("subjectAltName.dnsName", true); + + private final String configValue; + private final boolean supportedForRestrictedTrust; + + X509Field(String configValue, boolean supportedForRestrictedTrust) { + this.configValue = configValue; + this.supportedForRestrictedTrust = supportedForRestrictedTrust; + } + + @Override + public String toString() { + return configValue; + } + + public static X509Field parseForRestrictedTrust(String s) { + return EnumSet.allOf(X509Field.class) + .stream() + .filter(v -> v.supportedForRestrictedTrust) + .filter(v -> v.configValue.equalsIgnoreCase(s)) + .findFirst() + .orElseThrow(() -> { + throw new SslConfigException( + s + + " is not a supported x509 field for trust restrictions. " + + "Recognised values are [" + + EnumSet.allOf(X509Field.class).stream().map(e -> e.configValue).collect(Collectors.toSet()) + + "]" + ); + }); + } +} diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java index bf5f3d0696cb..5980aca16a78 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/SslConfigurationLoaderTests.java @@ -22,6 +22,7 @@ import javax.net.ssl.TrustManagerFactory; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; @@ -225,4 +226,20 @@ public void testChaCha20InCiphersOnJdk12Plus() { assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256")); assertThat(SslConfigurationLoader.DEFAULT_CIPHERS, hasItem("TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256")); } + + public void testErrorMessageForUnpairedKeyAndCertificateSettings() { + final boolean withKey = randomBoolean(); + if (withKey) { + settings = Settings.builder().put("test.ssl.key", "certs/cert1.key").build(); + } else { + settings = Settings.builder().put("test.ssl.certificate", "certs/cert1.cert").build(); + } + + final SslConfigException e = expectThrows(SslConfigException.class, () -> loader.load(certRoot)); + if (withKey) { + assertThat(e.getMessage(), containsString("cannot specify [test.ssl.key] without also setting [test.ssl.certificate]")); + } else { + assertThat(e.getMessage(), containsString("cannot specify [test.ssl.certificate] without also setting [test.ssl.key]")); + } + } } diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/X509FieldTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/X509FieldTests.java new file mode 100644 index 000000000000..a0de14aec887 --- /dev/null +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/X509FieldTests.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.ssl; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.common.ssl.X509Field.SAN_DNS; +import static org.elasticsearch.common.ssl.X509Field.SAN_OTHERNAME_COMMONNAME; +import static org.hamcrest.Matchers.containsString; + +public class X509FieldTests extends ESTestCase { + + public void testParseForRestrictedTrust() { + assertEquals(SAN_OTHERNAME_COMMONNAME, X509Field.parseForRestrictedTrust("subjectAltName.otherName.commonName")); + assertEquals(SAN_DNS, X509Field.parseForRestrictedTrust("subjectAltName.dnsName")); + SslConfigException exception = expectThrows(SslConfigException.class, () -> X509Field.parseForRestrictedTrust("foo.bar")); + assertThat(exception.getMessage(), containsString("foo.bar")); + assertThat(exception.getMessage(), containsString("is not a supported x509 field for trust restrictions")); + assertThat(exception.getMessage(), containsString(SAN_OTHERNAME_COMMONNAME.toString())); + assertThat(exception.getMessage(), containsString(SAN_DNS.toString())); + } +} diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle index bf110b9adb53..958e818e019d 100644 --- a/libs/x-content/build.gradle +++ b/libs/x-content/build.gradle @@ -74,7 +74,7 @@ def generateProviderManifest = tasks.register("generateProviderManifest") { doLast { manifestFile.parentFile.mkdirs() manifestFile.setText(configurations.providerImpl.files.stream() - .map(f -> f.name).collect(Collectors.joining('\n')), 'UTF-8') + .map(f -> f.name).sorted().collect(Collectors.joining('\n')), 'UTF-8') } } diff --git a/libs/x-content/impl/build.gradle b/libs/x-content/impl/build.gradle index a703bd0b44f9..670d6c8896a4 100644 --- a/libs/x-content/impl/build.gradle +++ b/libs/x-content/impl/build.gradle @@ -10,13 +10,15 @@ apply plugin: 'elasticsearch.java' archivesBaseName = "x-content-impl" +String jacksonVersion = "2.14.2" + dependencies { compileOnly project(':libs:elasticsearch-core') compileOnly project(':libs:elasticsearch-x-content') - implementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" - implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" - implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + implementation "com.fasterxml.jackson.core:jackson-core:${jacksonVersion}" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${jacksonVersion}" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${jacksonVersion}" + implementation "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${jacksonVersion}" implementation "org.yaml:snakeyaml:${versions.snakeyaml}" testImplementation(project(":test:framework")) { diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java index 32417bc86142..5208cbfb2952 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java @@ -435,6 +435,22 @@ public XContentBuilder value(byte value) throws IOException { return this; } + public XContentBuilder array(String name, byte[] values) throws IOException { + return field(name).values(values); + } + + private XContentBuilder values(byte[] values) throws IOException { + if (values == null) { + return nullValue(); + } + startArray(); + for (byte b : values) { + value(b); + } + endArray(); + return this; + } + //////////////////////////////////////////////////////////////////////////// // Double ////////////////////////////////// diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java index 0cc95f546937..90555c6fed45 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java @@ -305,7 +305,7 @@ public void testDotsAndDoubleWildcardInIncludedFieldName() throws IOException { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/80160") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/92632") public void testDotsAndDoubleWildcardInExcludedFieldName() throws IOException { testFilter( builder -> builder.startObject().endObject(), @@ -314,7 +314,6 @@ public void testDotsAndDoubleWildcardInExcludedFieldName() throws IOException { singleton("**.baz"), true ); - // bug of double wildcard in excludes report in https://github.com/FasterXML/jackson-core/issues/700 testFilter( builder -> builder.startObject().startObject("foo").field("baz", "test").endObject().endObject(), builder -> builder.startObject().startObject("foo").field("bar", "test").field("baz", "test").endObject().endObject(), @@ -335,6 +334,61 @@ private void testFilter(Builder expected, Builder sample, Set includes, assertFilterResult(expected.apply(createBuilder()), filter(sample, includes, excludes, matchFieldNamesWithDots)); } + public void testArrayWithEmptyObjectInInclude() throws IOException { + testFilter( + builder -> builder.startObject().startArray("foo").startObject().field("bar", "baz").endObject().endArray().endObject(), + builder -> builder.startObject() + .startArray("foo") + .startObject() + .field("bar", "baz") + .endObject() + .startObject() + .endObject() + .endArray() + .endObject(), + singleton("foo.bar"), + emptySet(), + true + ); + } + + public void testArrayWithEmptyArrayInInclude() throws IOException { + testFilter( + builder -> builder.startObject().startArray("foo").startObject().field("bar", "baz").endObject().endArray().endObject(), + builder -> builder.startObject() + .startArray("foo") + .startObject() + .field("bar", "baz") + .endObject() + .startArray() + .endArray() + .endArray() + .endObject(), + singleton("foo.bar"), + emptySet(), + true + ); + } + + public void testArrayWithLastObjectSkipped() throws IOException { + testFilter( + builder -> builder.startObject().startArray("foo").startObject().field("bar", "baz").endObject().endArray().endObject(), + builder -> builder.startObject() + .startArray("foo") + .startObject() + .field("bar", "baz") + .endObject() + .startObject() + .field("skipped", "value") + .endObject() + .endArray() + .endObject(), + singleton("foo.bar"), + emptySet(), + true + ); + } + protected abstract void assertFilterResult(XContentBuilder expected, XContentBuilder actual); protected abstract XContentType getXContentType(); @@ -355,12 +409,6 @@ private XContentBuilder filter(Builder sample, Set includes, Set } FilterPath[] excludesFilter = FilterPath.compile(excludes); if (excludesFilter != null && Arrays.stream(excludesFilter).anyMatch(FilterPath::hasDoubleWildcard)) { - /* - * If there are any double wildcard filters the parser based - * filtering produced weird invalid json. Just field names - * and no objects?! Weird. Anyway, we can't use it. - */ - assertFalse("can't filter on builder with dotted wildcards in exclude", matchFieldNamesWithDots); return filterOnBuilder(sample, includes, excludes); } return filterOnParser(sample, includes, excludes, matchFieldNamesWithDots); diff --git a/modules/aggregations/build.gradle b/modules/aggregations/build.gradle index b3cc698a5542..c4e4e5e0b401 100644 --- a/modules/aggregations/build.gradle +++ b/modules/aggregations/build.gradle @@ -1,4 +1,3 @@ -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams /* @@ -52,11 +51,7 @@ artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } -testClusters.configureEach { - module ':modules:lang-painless' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") -} - dependencies { compileOnly(project(':modules:lang-painless:spi')) + clusterModules(project(':modules:lang-painless')) } diff --git a/modules/aggregations/src/main/java/module-info.java b/modules/aggregations/src/main/java/module-info.java index 3e378d108255..b9f2cb834736 100644 --- a/modules/aggregations/src/main/java/module-info.java +++ b/modules/aggregations/src/main/java/module-info.java @@ -17,8 +17,13 @@ exports org.elasticsearch.aggregations.bucket.adjacency; exports org.elasticsearch.aggregations.bucket.timeseries; exports org.elasticsearch.aggregations.pipeline; + exports org.elasticsearch.aggregations.metric; opens org.elasticsearch.aggregations to org.elasticsearch.painless.spi; // whitelist resource access provides org.elasticsearch.painless.spi.PainlessExtension with org.elasticsearch.aggregations.AggregationsPainlessExtension; + + provides org.elasticsearch.plugins.spi.NamedXContentProvider + with + org.elasticsearch.aggregations.metric.MatrixStatsNamedXContentProvider; } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/AggregationsPlugin.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/AggregationsPlugin.java index c46f4b444dcf..8bf4785b96b4 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/AggregationsPlugin.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/AggregationsPlugin.java @@ -14,6 +14,9 @@ import org.elasticsearch.aggregations.bucket.histogram.InternalAutoDateHistogram; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; +import org.elasticsearch.aggregations.metric.InternalMatrixStats; +import org.elasticsearch.aggregations.metric.MatrixStatsAggregationBuilder; +import org.elasticsearch.aggregations.metric.MatrixStatsParser; import org.elasticsearch.aggregations.pipeline.BucketSelectorPipelineAggregationBuilder; import org.elasticsearch.aggregations.pipeline.BucketSortPipelineAggregationBuilder; import org.elasticsearch.aggregations.pipeline.Derivative; @@ -48,6 +51,10 @@ public List getAggregations() { ).addResultReader(InternalAutoDateHistogram::new) .setAggregatorRegistrar(AutoDateHistogramAggregationBuilder::registerAggregators) ); + specs.add( + new AggregationSpec(MatrixStatsAggregationBuilder.NAME, MatrixStatsAggregationBuilder::new, new MatrixStatsParser()) + .addResultReader(InternalMatrixStats::new) + ); if (IndexSettings.isTimeSeriesModeEnabled()) { specs.add( new AggregationSpec( diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index 8867ba732df1..db88685b5fbc 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.aggregations.bucket.adjacency; import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregator.KeyedFilter; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -261,7 +261,7 @@ public String getType() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 8002d1c34f60..cac35ce644bf 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.bucket.histogram; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -122,7 +122,7 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { minimumIntervalExpression = in.readOptionalString(); } } @@ -130,7 +130,7 @@ public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { out.writeOptionalString(minimumIntervalExpression); } } @@ -267,8 +267,8 @@ public boolean equals(Object obj) { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } public static class RoundingInfo implements Writeable { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 4860bedaee61..c91a6bed8a71 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -226,7 +226,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); buckets = in.readList(stream -> new Bucket(stream, format)); this.targetBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { bucketInnerInterval = in.readVLong(); } else { bucketInnerInterval = 1; // Calculated on merge. @@ -239,7 +239,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeList(buckets); out.writeVInt(targetBuckets); - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeVLong(bucketInnerInterval); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java index 088568fefc03..44fa62fef3fa 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeries.java @@ -217,6 +217,9 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurr } InternalTimeSeries reduced = new InternalTimeSeries(name, new ArrayList<>(initialCapacity), keyed, getMetadata()); + Integer size = reduceContext.builder() instanceof TimeSeriesAggregationBuilder + ? ((TimeSeriesAggregationBuilder) reduceContext.builder()).getSize() + : null; // tests may use a fake builder List bucketsWithSameKey = new ArrayList<>(aggregations.size()); BytesRef prevTsid = null; while (pq.size() > 0) { @@ -240,12 +243,16 @@ protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurr InternalBucket reducedBucket; if (bucketsWithSameKey.size() == 1) { reducedBucket = bucketsWithSameKey.get(0); + reducedBucket.aggregations = InternalAggregations.reduce(List.of(reducedBucket.aggregations), reduceContext); } else { reducedBucket = reduceBucket(bucketsWithSameKey, reduceContext); } BytesRef tsid = reducedBucket.key; assert prevTsid == null || tsid.compareTo(prevTsid) > 0; reduced.buckets.add(reducedBucket); + if (size != null && reduced.buckets.size() >= size) { + break; + } prevTsid = tsid; } return reduced; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java index 5815b0e49967..405a38237375 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java @@ -8,13 +8,14 @@ package org.elasticsearch.aggregations.bucket.timeseries; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.MultiBucketConsumerService; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -30,9 +31,13 @@ public class TimeSeriesAggregationBuilder extends AbstractAggregationBuilder { public static final String NAME = "time_series"; public static final ParseField KEYED_FIELD = new ParseField("keyed"); + public static final ParseField SIZE_FIELD = new ParseField("size"); public static final InstantiatingObjectParser PARSER; private boolean keyed; + private int size; + + private static final int DEFAULT_SIZE = MultiBucketConsumerService.DEFAULT_MAX_BUCKETS; static { InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( @@ -41,17 +46,23 @@ public class TimeSeriesAggregationBuilder extends AbstractAggregationBuilder metadata) throws IOException { - return new TimeSeriesAggregator(name, factories, keyed, context, parent, cardinality, metadata); + return new TimeSeriesAggregator(name, factories, keyed, context, parent, cardinality, metadata, size); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index 2d1e45183986..6930b0579a89 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -30,6 +30,7 @@ public class TimeSeriesAggregator extends BucketsAggregator { protected final BytesKeyedBucketOrds bucketOrds; private final boolean keyed; + private final int size; public TimeSeriesAggregator( String name, @@ -38,11 +39,13 @@ public TimeSeriesAggregator( AggregationContext context, Aggregator parent, CardinalityUpperBound bucketCardinality, - Map metadata + Map metadata, + int size ) throws IOException { super(name, factories, context, parent, CardinalityUpperBound.MANY, metadata); this.keyed = keyed; bucketOrds = BytesKeyedBucketOrds.build(bigArrays(), bucketCardinality); + this.size = size; } @Override @@ -66,6 +69,9 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I ); bucket.bucketOrd = ordsEnum.ord(); buckets.add(bucket); + if (buckets.size() >= size) { + break; + } } allBucketsPerOrd[ordIdx] = buckets.toArray(new InternalTimeSeries.InternalBucket[0]); } @@ -92,8 +98,23 @@ protected void doClose() { protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { return new LeafBucketCollectorBase(sub, null) { + // Keeping track of these fields helps to reduce time spent attempting to add bucket + tsid combos that already were added. + long currentTsidOrd = -1; + long currentBucket = -1; + long currentBucketOrdinal; + @Override public void collect(int doc, long bucket) throws IOException { + // Naively comparing bucket against currentBucket and tsid ord to currentBucket can work really well. + // TimeSeriesIndexSearcher ensures that docs are emitted in tsid and timestamp order, so if tsid ordinal + // changes to what is stored in currentTsidOrd then that ordinal well never occur again. Same applies + // currentBucket if there is no parent aggregation or the immediate parent aggregation creates buckets + // based on @timestamp field or dimension fields (fields that make up the tsid). + if (currentBucket == bucket && currentTsidOrd == aggCtx.getTsidOrd()) { + collectExistingBucket(sub, doc, currentBucketOrdinal); + return; + } + long bucketOrdinal = bucketOrds.add(bucket, aggCtx.getTsid()); if (bucketOrdinal < 0) { // already seen bucketOrdinal = -1 - bucketOrdinal; @@ -101,6 +122,10 @@ public void collect(int doc, long bucket) throws IOException { } else { collectBucket(sub, doc, bucketOrdinal); } + + currentBucketOrdinal = bucketOrdinal; + currentTsidOrd = aggCtx.getTsidOrd(); + currentBucket = bucket; } }; } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSource.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java similarity index 98% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSource.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java index d853c06ea3e7..9a46d7120501 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSource.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSource.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix; +package org.elasticsearch.aggregations.metric; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.index.fielddata.NumericDoubleValues; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java similarity index 99% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceAggregationBuilder.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java index 03094ad8690e..b78707bbb1f8 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregationBuilder.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceAggregatorFactory.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregatorFactory.java similarity index 98% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceAggregatorFactory.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregatorFactory.java index 9249c31e156a..26e2110ecbb8 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceAggregatorFactory.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceAggregatorFactory.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceParser.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java similarity index 89% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceParser.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java index 68daf3e8dbb6..eab1c2482028 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/ArrayValuesSourceParser.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ArrayValuesSourceParser.java @@ -6,14 +6,13 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.common.ParsingException; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder.CommonFields; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; -import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -27,36 +26,34 @@ import java.util.List; import java.util.Map; -public abstract class ArrayValuesSourceParser implements Aggregator.Parser { +public abstract class ArrayValuesSourceParser implements Aggregator.Parser { - public abstract static class NumericValuesSourceParser extends ArrayValuesSourceParser { + public abstract static class NumericValuesSourceParser extends ArrayValuesSourceParser { protected NumericValuesSourceParser(boolean formattable) { - super(formattable, CoreValuesSourceType.NUMERIC, ValueType.NUMERIC); + super(formattable, CoreValuesSourceType.NUMERIC); } } - public abstract static class BytesValuesSourceParser extends ArrayValuesSourceParser { + public abstract static class BytesValuesSourceParser extends ArrayValuesSourceParser { protected BytesValuesSourceParser(boolean formattable) { - super(formattable, CoreValuesSourceType.KEYWORD, ValueType.STRING); + super(formattable, CoreValuesSourceType.KEYWORD); } } - public abstract static class GeoPointValuesSourceParser extends ArrayValuesSourceParser { + public abstract static class GeoPointValuesSourceParser extends ArrayValuesSourceParser { protected GeoPointValuesSourceParser(boolean formattable) { - super(formattable, CoreValuesSourceType.GEOPOINT, ValueType.GEOPOINT); + super(formattable, CoreValuesSourceType.GEOPOINT); } } - private boolean formattable = false; - private ValuesSourceType valuesSourceType = null; - private ValueType targetValueType = null; + private final boolean formattable; + private final ValuesSourceType valuesSourceType; - private ArrayValuesSourceParser(boolean formattable, ValuesSourceType valuesSourceType, ValueType targetValueType) { + private ArrayValuesSourceParser(boolean formattable, ValuesSourceType valuesSourceType) { this.valuesSourceType = valuesSourceType; - this.targetValueType = targetValueType; this.formattable = formattable; } @@ -159,12 +156,7 @@ public final ArrayValuesSourceAggregationBuilder parse(String aggregationName } } - ArrayValuesSourceAggregationBuilder factory = createFactory( - aggregationName, - this.valuesSourceType, - this.targetValueType, - otherOptions - ); + ArrayValuesSourceAggregationBuilder factory = createFactory(aggregationName, this.valuesSourceType, otherOptions); if (fields != null) { factory.fields(fields); } @@ -216,8 +208,6 @@ private void parseMissingAndAdd( * the name of the aggregation * @param valuesSourceType * the type of the {@link ValuesSource} - * @param targetValueType - * the target type of the final value output by the aggregation * @param otherOptions * a {@link Map} containing the extra options parsed by the * {@link #token(String, String, XContentParser.Token, XContentParser, Map)} @@ -227,14 +217,13 @@ private void parseMissingAndAdd( protected abstract ArrayValuesSourceAggregationBuilder createFactory( String aggregationName, ValuesSourceType valuesSourceType, - ValueType targetValueType, Map otherOptions ); /** * Allows subclasses of {@link ArrayValuesSourceParser} to parse extra * parameters and store them in a {@link Map} which will later be passed to - * {@link #createFactory(String, ValuesSourceType, ValueType, Map)}. + * {@link #createFactory(String, ValuesSourceType, Map)}. * * @param aggregationName * the name of the aggregation @@ -247,7 +236,7 @@ protected abstract ArrayValuesSourceAggregationBuilder createFactory( * @param otherOptions * a {@link Map} of options to be populated by successive calls * to this method which will then be passed to the - * {@link #createFactory(String, ValuesSourceType, ValueType, Map)} + * {@link #createFactory(String, ValuesSourceType, Map)} * method * @return true if the current token was correctly parsed, * false otherwise diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java similarity index 93% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java index f3b1db353544..c7428393b275 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/InternalMatrixStats.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +27,7 @@ /** * Computes distribution statistics over multiple fields */ -public class InternalMatrixStats extends InternalAggregation implements MatrixStats { +public class InternalMatrixStats extends InternalAggregation { /** per shard stats needed to compute stats */ private final RunningStats stats; /** final result */ @@ -67,8 +67,7 @@ public String getWriteableName() { return MatrixStatsAggregationBuilder.NAME; } - /** get the number of documents */ - @Override + /** return the total document count */ public long getDocCount() { if (results != null) { return results.getDocCount(); @@ -79,8 +78,7 @@ public long getDocCount() { return stats.docCount; } - /** get the number of samples for the given field. == docCount - numMissing */ - @Override + /** return total field count (differs from docCount if there are missing values) */ public long getFieldCount(String field) { if (results == null) { return 0; @@ -88,8 +86,7 @@ public long getFieldCount(String field) { return results.getFieldCount(field); } - /** get the mean for the given field */ - @Override + /** return the field mean */ public double getMean(String field) { if (results == null) { return Double.NaN; @@ -97,8 +94,7 @@ public double getMean(String field) { return results.getMean(field); } - /** get the variance for the given field */ - @Override + /** return the field variance */ public double getVariance(String field) { if (results == null) { return Double.NaN; @@ -106,8 +102,7 @@ public double getVariance(String field) { return results.getVariance(field); } - /** get the distribution skewness for the given field */ - @Override + /** return the skewness of the distribution */ public double getSkewness(String field) { if (results == null) { return Double.NaN; @@ -115,8 +110,7 @@ public double getSkewness(String field) { return results.getSkewness(field); } - /** get the distribution shape for the given field */ - @Override + /** return the kurtosis of the distribution */ public double getKurtosis(String field) { if (results == null) { return Double.NaN; @@ -124,8 +118,7 @@ public double getKurtosis(String field) { return results.getKurtosis(field); } - /** get the covariance between the two fields */ - @Override + /** return the covariance between field x and field y */ public double getCovariance(String fieldX, String fieldY) { if (results == null) { return Double.NaN; @@ -133,8 +126,7 @@ public double getCovariance(String fieldX, String fieldY) { return results.getCovariance(fieldX, fieldY); } - /** get the correlation between the two fields */ - @Override + /** return the correlation coefficient of field x and field y */ public double getCorrelation(String fieldX, String fieldY) { if (results == null) { return Double.NaN; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixAggregationInspectionHelper.java similarity index 91% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixAggregationInspectionHelper.java index de1aff8913b3..6befc7e1c87e 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixAggregationInspectionHelper.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; /** * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java similarity index 85% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java index c4c476ff65e9..d8edb19c2782 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java @@ -5,16 +5,15 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.matrix.ArrayValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.xcontent.ToXContent; @@ -56,11 +55,16 @@ public boolean supportsSampling() { */ public MatrixStatsAggregationBuilder(StreamInput in) throws IOException { super(in); + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + multiValueMode = MultiValueMode.readMultiValueModeFrom(in); + } } @Override - protected void innerWriteTo(StreamOutput out) { - // Do nothing, no extra state to write to stream + protected void innerWriteTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + multiValueMode.writeTo(out); + } } public MatrixStatsAggregationBuilder multiValueMode(MultiValueMode multiValueMode) { @@ -94,7 +98,7 @@ public String getType() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregator.java similarity index 94% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregator.java index 6c3d41138ebc..c6ac2818f824 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregator.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.apache.lucene.search.ScoreMode; import org.elasticsearch.common.util.ObjectArray; @@ -17,7 +17,6 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.matrix.ArrayValuesSource.NumericArrayValuesSource; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -30,7 +29,7 @@ **/ final class MatrixStatsAggregator extends MetricsAggregator { /** Multiple ValuesSource with field names */ - private final NumericArrayValuesSource valuesSources; + private final ArrayValuesSource.NumericArrayValuesSource valuesSources; /** array of descriptive stats, per shard, needed to compute the correlation */ ObjectArray stats; @@ -45,7 +44,7 @@ final class MatrixStatsAggregator extends MetricsAggregator { ) throws IOException { super(name, context, parent, metadata); if (valuesSources != null && valuesSources.isEmpty() == false) { - this.valuesSources = new NumericArrayValuesSource(valuesSources, multiValueMode); + this.valuesSources = new ArrayValuesSource.NumericArrayValuesSource(valuesSources, multiValueMode); stats = context.bigArrays().newObjectArray(1); } else { this.valuesSources = null; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregatorFactory.java similarity index 95% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregatorFactory.java index 0e14bad7a909..2418aada6c26 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregatorFactory.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.common.util.Maps; import org.elasticsearch.search.MultiValueMode; @@ -14,7 +14,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.CardinalityUpperBound; -import org.elasticsearch.search.aggregations.matrix.ArrayValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsNamedXContentProvider.java similarity index 84% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsNamedXContentProvider.java index ff3e6e06e22b..66670a3f6902 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/spi/MatrixStatsNamedXContentProvider.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsNamedXContentProvider.java @@ -6,12 +6,10 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.spi; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.matrix.stats.ParsedMatrixStats; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsParser.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsParser.java new file mode 100644 index 000000000000..edf35947f07c --- /dev/null +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsParser.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.aggregations.metric; + +import org.elasticsearch.search.MultiValueMode; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; + +public class MatrixStatsParser extends ArrayValuesSourceParser.NumericValuesSourceParser { + + public MatrixStatsParser() { + super(true); + } + + @Override + protected boolean token( + String aggregationName, + String currentFieldName, + XContentParser.Token token, + XContentParser parser, + Map otherOptions + ) throws IOException { + if (ArrayValuesSourceAggregationBuilder.MULTIVALUE_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + if (token == XContentParser.Token.VALUE_STRING) { + otherOptions.put(ArrayValuesSourceAggregationBuilder.MULTIVALUE_MODE_FIELD, parser.text()); + return true; + } + } + return false; + } + + @Override + protected MatrixStatsAggregationBuilder createFactory( + String aggregationName, + ValuesSourceType valuesSourceType, + Map otherOptions + ) { + MatrixStatsAggregationBuilder builder = new MatrixStatsAggregationBuilder(aggregationName); + String mode = (String) otherOptions.get(ArrayValuesSourceAggregationBuilder.MULTIVALUE_MODE_FIELD); + if (mode != null) { + builder.multiValueMode(MultiValueMode.fromString(mode)); + } + return builder; + } +} diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java similarity index 99% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java index 45dbe94e9ce8..b5b3fad8253e 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsResults.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsResults.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java similarity index 97% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java index a89e6c7bbc30..0c44946ac96a 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/ParsedMatrixStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/ParsedMatrixStats.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.common.util.Maps; import org.elasticsearch.search.aggregations.ParsedAggregation; @@ -21,7 +21,7 @@ import java.util.Map; import java.util.Objects; -public class ParsedMatrixStats extends ParsedAggregation implements MatrixStats { +public class ParsedMatrixStats extends ParsedAggregation { private final Map counts = new LinkedHashMap<>(); private final Map means = new HashMap<>(); @@ -42,12 +42,10 @@ private void setDocCount(long docCount) { this.docCount = docCount; } - @Override public long getDocCount() { return docCount; } - @Override public long getFieldCount(String field) { if (counts.containsKey(field) == false) { return 0; @@ -55,27 +53,22 @@ public long getFieldCount(String field) { return counts.get(field); } - @Override public double getMean(String field) { return checkedGet(means, field); } - @Override public double getVariance(String field) { return checkedGet(variances, field); } - @Override public double getSkewness(String field) { return checkedGet(skewness, field); } - @Override public double getKurtosis(String field) { return checkedGet(kurtosis, field); } - @Override public double getCovariance(String fieldX, String fieldY) { if (fieldX.equals(fieldY)) { return checkedGet(variances, fieldX); @@ -83,7 +76,6 @@ public double getCovariance(String fieldX, String fieldY) { return MatrixStatsResults.getValFromUpperTriangularMatrix(covariances, fieldX, fieldY); } - @Override public double getCorrelation(String fieldX, String fieldY) { if (fieldX.equals(fieldY)) { return 1.0; diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java similarity index 99% rename from modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java rename to modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java index 5ec08c68ed98..e4b8e15cd5e1 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/RunningStats.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java index 15bd818ca98b..2271bbe44d3f 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.pipeline; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -214,7 +214,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java index 2bd65c6b0a99..80f9c66814f6 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.aggregations.pipeline; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; @@ -193,7 +193,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index f4838441938e..32a8b5897ae8 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.pipeline; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -256,7 +256,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_4_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_4_0; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 8c721ae2e179..821869424c44 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.pipeline; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -239,7 +239,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/aggregations/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/modules/aggregations/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 000000000000..c44951e05c94 --- /dev/null +++ b/modules/aggregations/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1,9 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the Server Side Public License, v 1; you may not use this file except +# in compliance with, at your election, the Elastic License 2.0 or the Server +# Side Public License, v 1. +# + +org.elasticsearch.aggregations.metric.MatrixStatsNamedXContentProvider diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index a64d52b1ed7c..c44242556c4d 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.BooleanFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -859,7 +860,7 @@ public void testIntervalMinute() throws IOException { fullDocCount.clear(); fullDocCount.putAll(skeletonDocCount); for (int minute = 3; minute < 15; minute++) { - fullDocCount.put(formatted("2017-02-01T09:%02d:00.000Z", minute), 0); + fullDocCount.put(Strings.format("2017-02-01T09:%02d:00.000Z", minute), 0); } testSearchCase( DEFAULT_QUERY, diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java index 943e7f43c754..1ed6fa058e64 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/InternalTimeSeriesTests.java @@ -171,4 +171,9 @@ public void testReduceSimple() { assertThat(result.getBuckets().get(4).key.utf8ToString(), equalTo("9")); assertThat(result.getBuckets().get(4).getDocCount(), equalTo(9L)); } + + @Override + protected InternalTimeSeries mutateInstance(InternalTimeSeries instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilderTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilderTests.java index 2c87c9f298dc..eb738dc3fe82 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilderTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilderTests.java @@ -14,7 +14,8 @@ public class TimeSeriesAggregationBuilderTests extends AggregationBuilderTestCas @Override protected TimeSeriesAggregationBuilder createTestAggregatorBuilder() { - return new TimeSeriesAggregationBuilder(randomAlphaOfLength(10), randomBoolean()); + // Size set large enough tests not intending to hit the size limit shouldn't see it. + return new TimeSeriesAggregationBuilder(randomAlphaOfLength(10), randomBoolean(), randomIntBetween(1000, 100_000)); } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java index b98e464daefb..20d2b45158b3 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregatorTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.DoubleDocValuesField; import org.apache.lucene.document.FloatDocValuesField; +import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; @@ -26,6 +27,10 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdBuilder; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.support.ValuesSourceType; @@ -35,6 +40,7 @@ import java.util.function.Consumer; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -77,6 +83,7 @@ public void testStandAloneTimeSeriesWithSum() throws IOException { public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { final List fields = new ArrayList<>(); fields.add(new SortedNumericDocValuesField(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); + fields.add(new LongPoint(DataStreamTimestampFieldMapper.DEFAULT_PATH, timestamp)); final TimeSeriesIdBuilder builder = new TimeSeriesIdBuilder(null); for (int i = 0; i < dimensions.length; i += 2) { if (dimensions[i + 1]instanceof Number n) { @@ -99,6 +106,119 @@ public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimens iw.addDocument(fields); } + public void testWithDateHistogramExecutedAsFilterByFilterWithTimeSeriesIndexSearcher() throws IOException { + DateHistogramAggregationBuilder aggregationBuilder = new DateHistogramAggregationBuilder("by_timestamp").field("@timestamp") + .fixedInterval(DateHistogramInterval.HOUR) + .subAggregation(new TimeSeriesAggregationBuilder("ts").subAggregation(sum("sum").field("val1"))); + + // Before this threw a CollectionTerminatedException because FilterByFilterAggregation#getLeafCollector() always returns a + // LeafBucketCollector.NO_OP_COLLECTOR instance. And TimeSeriesIndexSearcher can't deal with this when initializing the + // leaf walkers. + testCase(iw -> { + long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); + for (int i = 1; i <= 5000; i++) { + writeTS(iw, startTime++, new Object[] { "dim1", "aaa" }, new Object[] { "val1", 1 }); + } + }, internalAggregation -> { + InternalDateHistogram dateHistogram = (InternalDateHistogram) internalAggregation; + assertThat(dateHistogram.getBuckets(), hasSize(1)); + InternalTimeSeries timeSeries = dateHistogram.getBuckets().get(0).getAggregations().get("ts"); + assertThat(timeSeries.getBuckets(), hasSize(1)); + Sum sum = timeSeries.getBuckets().get(0).getAggregations().get("sum"); + assertThat(sum.value(), equalTo(5000.0)); + }, + new AggTestConfig( + aggregationBuilder, + TimeSeriesIdFieldMapper.FIELD_TYPE, + new DateFieldMapper.DateFieldType("@timestamp"), + new KeywordFieldMapper.KeywordFieldType("dim1"), + new NumberFieldMapper.NumberFieldType("val1", NumberFieldMapper.NumberType.INTEGER) + ).withQuery(new MatchAllDocsQuery()) + ); + } + + public void testMultiBucketAggregationAsSubAggregation() throws IOException { + long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); + CheckedConsumer buildIndex = iw -> { + writeTS(iw, startTime + 1, new Object[] { "dim1", "aaa", "dim2", "xxx" }, new Object[] {}); + writeTS(iw, startTime + 2, new Object[] { "dim1", "aaa", "dim2", "yyy" }, new Object[] {}); + writeTS(iw, startTime + 3, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] {}); + writeTS(iw, startTime + 4, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] {}); + writeTS(iw, startTime + 5, new Object[] { "dim1", "aaa", "dim2", "xxx" }, new Object[] {}); + writeTS(iw, startTime + 6, new Object[] { "dim1", "aaa", "dim2", "yyy" }, new Object[] {}); + writeTS(iw, startTime + 7, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] {}); + writeTS(iw, startTime + 8, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] {}); + }; + Consumer verifier = ts -> { + assertThat(ts.getBuckets(), hasSize(3)); + + assertThat(ts.getBucketByKey("{dim1=aaa, dim2=xxx}").docCount, equalTo(2L)); + InternalDateHistogram byTimeStampBucket = ts.getBucketByKey("{dim1=aaa, dim2=xxx}").getAggregations().get("by_timestamp"); + assertThat( + byTimeStampBucket.getBuckets(), + contains(new InternalDateHistogram.Bucket(startTime, 2, false, null, InternalAggregations.EMPTY)) + ); + assertThat(ts.getBucketByKey("{dim1=aaa, dim2=yyy}").docCount, equalTo(2L)); + byTimeStampBucket = ts.getBucketByKey("{dim1=aaa, dim2=yyy}").getAggregations().get("by_timestamp"); + assertThat( + byTimeStampBucket.getBuckets(), + contains(new InternalDateHistogram.Bucket(startTime, 2, false, null, InternalAggregations.EMPTY)) + ); + assertThat(ts.getBucketByKey("{dim1=bbb, dim2=zzz}").docCount, equalTo(4L)); + byTimeStampBucket = ts.getBucketByKey("{dim1=bbb, dim2=zzz}").getAggregations().get("by_timestamp"); + assertThat( + byTimeStampBucket.getBuckets(), + contains(new InternalDateHistogram.Bucket(startTime, 4, false, null, InternalAggregations.EMPTY)) + ); + }; + + DateHistogramAggregationBuilder dateBuilder = new DateHistogramAggregationBuilder("by_timestamp"); + dateBuilder.field("@timestamp"); + dateBuilder.fixedInterval(DateHistogramInterval.seconds(1)); + TimeSeriesAggregationBuilder tsBuilder = new TimeSeriesAggregationBuilder("by_tsid"); + tsBuilder.subAggregation(dateBuilder); + timeSeriesTestCase(tsBuilder, new MatchAllDocsQuery(), buildIndex, verifier); + } + + public void testAggregationSize() throws IOException { + CheckedConsumer buildIndex = multiTsWriter(); + + List> verifiers = new ArrayList>(); + + verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=aaa, dim2=xxx}").docCount, equalTo(2L))); + verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=aaa, dim2=yyy}").docCount, equalTo(2L))); + verifiers.add(ts -> assertThat(ts.getBucketByKey("{dim1=bbb, dim2=zzz}").docCount, equalTo(2L))); + + for (int i = 1; i < 3; i++) { + int size = i; + Consumer limitedVerifier = ts -> { + assertThat(ts.getBuckets(), hasSize(size)); + + for (int j = 0; j < size; j++) { + verifiers.get(j).accept(ts); + } + }; + + TimeSeriesAggregationBuilder limitedTsBuilder = new TimeSeriesAggregationBuilder("by_tsid"); + limitedTsBuilder.setSize(i); + timeSeriesTestCase(limitedTsBuilder, new MatchAllDocsQuery(), buildIndex, limitedVerifier); + } + } + + private CheckedConsumer multiTsWriter() { + long startTime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); + return iw -> { + writeTS(iw, startTime + 1, new Object[] { "dim1", "aaa", "dim2", "xxx" }, new Object[] { "val1", 1 }); + writeTS(iw, startTime + 2, new Object[] { "dim1", "aaa", "dim2", "yyy" }, new Object[] { "val1", 2 }); + writeTS(iw, startTime + 3, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] { "val1", 3 }); + writeTS(iw, startTime + 4, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] { "val1", 4 }); + writeTS(iw, startTime + 5, new Object[] { "dim1", "aaa", "dim2", "xxx" }, new Object[] { "val1", 5 }); + writeTS(iw, startTime + 6, new Object[] { "dim1", "aaa", "dim2", "yyy" }, new Object[] { "val1", 6 }); + writeTS(iw, startTime + 7, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] { "val1", 7 }); + writeTS(iw, startTime + 8, new Object[] { "dim1", "bbb", "dim2", "zzz" }, new Object[] { "val1", 8 }); + }; + } + private void timeSeriesTestCase( TimeSeriesAggregationBuilder builder, Query query, diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/BaseMatrixStatsTestCase.java similarity index 95% rename from modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java rename to modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/BaseMatrixStatsTestCase.java index 2e46dd436fa5..9cbebb4dab1e 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/BaseMatrixStatsTestCase.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/BaseMatrixStatsTestCase.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.test.ESTestCase; import org.junit.Before; diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java similarity index 83% rename from modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java rename to modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java index 0dfbf854bc00..9ca315b5a7b7 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/InternalMatrixStatsTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/InternalMatrixStatsTests.java @@ -5,8 +5,10 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; +import org.elasticsearch.aggregations.AggregationsPlugin; +import org.elasticsearch.aggregations.metric.InternalMatrixStats.Fields; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; @@ -20,8 +22,6 @@ import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.ParsedAggregation; -import org.elasticsearch.search.aggregations.matrix.MatrixAggregationPlugin; -import org.elasticsearch.search.aggregations.matrix.stats.InternalMatrixStats.Fields; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.xcontent.ContextParser; @@ -46,7 +46,7 @@ public class InternalMatrixStatsTests extends InternalAggregationTestCase randomAlphaOfLength(3)); + String unknownField = randomAlphaOfLength(3); + String other = randomValueOtherThan(unknownField, () -> randomAlphaOfLength(3)); + // getFieldCount returns 0 for unknown fields + assertEquals(0.0, actual.getFieldCount(unknownField), 0.0); - for (MatrixStats matrix : Arrays.asList(actual)) { + expectThrows(IllegalArgumentException.class, () -> actual.getMean(unknownField)); + expectThrows(IllegalArgumentException.class, () -> actual.getVariance(unknownField)); + expectThrows(IllegalArgumentException.class, () -> actual.getSkewness(unknownField)); + expectThrows(IllegalArgumentException.class, () -> actual.getKurtosis(unknownField)); - // getFieldCount returns 0 for unknown fields - assertEquals(0.0, matrix.getFieldCount(unknownField), 0.0); + expectThrows(IllegalArgumentException.class, () -> actual.getCovariance(unknownField, unknownField)); + expectThrows(IllegalArgumentException.class, () -> actual.getCovariance(unknownField, other)); + expectThrows(IllegalArgumentException.class, () -> actual.getCovariance(other, unknownField)); - expectThrows(IllegalArgumentException.class, () -> matrix.getMean(unknownField)); - expectThrows(IllegalArgumentException.class, () -> matrix.getVariance(unknownField)); - expectThrows(IllegalArgumentException.class, () -> matrix.getSkewness(unknownField)); - expectThrows(IllegalArgumentException.class, () -> matrix.getKurtosis(unknownField)); - - expectThrows(IllegalArgumentException.class, () -> matrix.getCovariance(unknownField, unknownField)); - expectThrows(IllegalArgumentException.class, () -> matrix.getCovariance(unknownField, other)); - expectThrows(IllegalArgumentException.class, () -> matrix.getCovariance(other, unknownField)); - - assertEquals(1.0, matrix.getCorrelation(unknownField, unknownField), 0.0); - expectThrows(IllegalArgumentException.class, () -> matrix.getCorrelation(unknownField, other)); - expectThrows(IllegalArgumentException.class, () -> matrix.getCorrelation(other, unknownField)); - } + assertEquals(1.0, actual.getCorrelation(unknownField, unknownField), 0.0); + expectThrows(IllegalArgumentException.class, () -> actual.getCorrelation(unknownField, other)); + expectThrows(IllegalArgumentException.class, () -> actual.getCorrelation(other, unknownField)); } @Override diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregatorTests.java similarity index 91% rename from modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java rename to modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregatorTests.java index d40aa1de5385..92dfb03ff301 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregatorTests.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -16,17 +16,14 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.aggregations.bucket.AggregationTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.matrix.MatrixAggregationPlugin; import java.util.Arrays; import java.util.Collections; -import java.util.List; -public class MatrixStatsAggregatorTests extends AggregatorTestCase { +public class MatrixStatsAggregatorTests extends AggregationTestCase { public void testNoData() throws Exception { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.DOUBLE); @@ -100,9 +97,4 @@ public void testTwoFields() throws Exception { } } } - - @Override - protected List getSearchPlugins() { - return Collections.singletonList(new MatrixAggregationPlugin()); - } } diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java similarity index 99% rename from modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java rename to modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java index 0bc3685f8cf8..6a43f02697e2 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MultiPassStats.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/MultiPassStats.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import org.elasticsearch.common.util.Maps; diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/RunningStatsTests.java similarity index 98% rename from modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java rename to modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/RunningStatsTests.java index 36d29be98c35..76eda2a8dca1 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/RunningStatsTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/metric/RunningStatsTests.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.matrix.stats; +package org.elasticsearch.aggregations.metric; import java.util.Arrays; import java.util.HashSet; diff --git a/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java b/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java index a0df2af9d59e..a3c737e2795d 100644 --- a/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java +++ b/modules/aggregations/src/yamlRestTest/java/org/elasticsearch/aggregations/AggregationsClientYamlTestSuiteIT.java @@ -10,10 +10,20 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class AggregationsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("aggregations") + .module("lang-painless") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + public AggregationsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } @@ -22,4 +32,9 @@ public AggregationsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate t public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/20_empty_bucket.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats.yml similarity index 100% rename from modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/20_empty_bucket.yml rename to modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats.yml diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats_multi_value_field.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats_multi_value_field.yml new file mode 100644 index 000000000000..2e2b9a94ee46 --- /dev/null +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats_multi_value_field.yml @@ -0,0 +1,200 @@ +--- +setup: + - skip: + version: " - 8.6.99" + reason: serialization bug fixed in 8.7.0 + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 3 + number_of_routing_shards: 3 + mappings: + "properties": + "val1": + "type": "double" + "val2": + "type": "double" + "val3": + "type": "double" + + - do: + indices.create: + index: unmapped + body: + settings: + number_of_shards: 3 + + - do: + index: + index: test + id: "1" + body: { "val1": 1.9, "val2": 3.1, "val3": 2.3, "vals" : [1.9, 16.143] } + - do: + index: + index: test + id: "2" + body: { "val1": -5.2, "val2": -3.4, "val3": 2.3, "vals" : [155, 16.23]} + - do: + index: + index: test + id: "3" + body: { "val1": -5.2, "val3": 2.3, "vals" : [-455, -32.32]} + - do: + index: + index: test + id: "4" + body: { "val1": 18.3, "val2": 104.4, "val3": 2.3, "vals" : [0.14, 92.1]} + - do: + index: + index: test + id: "5" + body: { "val1": -53.2, "val2": -322.4, "val3": 2.3, "vals" : [16, 16]} + - do: + index: + index: test + id: "6" + body: { "val1": -578.9, "val2": 69.9, "val3": 2.3} + - do: + index: + index: test + id: "7" + body: { "val1": 16.2, "val2": 17.2, "val3": 2.3, "vals" : [1234.3, -3433]} + - do: + index: + index: test + id: "8" + body: { "val1": -4222.63, "val2": 316.44, "val3": 2.3, "vals" : [177.2, -93.333]} + - do: + index: + index: test + id: "9" + body: { "val1": -59999.55, "val2": -3163.4, "val3": 2.3, "vals" : [-29.9, 163.0]} + - do: + index: + index: test + id: "10" + body: { "val1": 782.7, "val2": 789.7, "val3": 2.3, "vals" : [-0.2, 1343.3]} + - do: + index: + index: test + id: "11" + body: { "val1": -1.2, "val2": 6.3, "val3": 2.3, "vals" : [15.3, 16.9]} + - do: + index: + index: test + id: "12" + body: { "val1": 0, "val2": 1.11, "val3": 2.3, "vals" : [-644.4, -644.4]} + - do: + index: + index: test + id: "13" + body: { "val1": 0.1, "val2": 0.92, "val3": 2.3, "vals" : [73.2, 0.12]} + - do: + index: + index: test + id: "14" + body: { "val1": 0.12, "val2": -82.4, "val3": 2.3, "vals" : [-0.001, 1295.3]} + - do: + index: + index: test + id: "15" + body: { "val1": 98.2, "val2": 32.4, "val3": 2.3, "vals" : [15.5, 16.5]} + + - do: + indices.refresh: + index: [test, unmapped] + + - do: + cluster.health: + wait_for_status: yellow + +--- +"Unmapped": + + - do: + search: + rest_total_hits_as_int: true + index: unmapped + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } + + - match: {hits.total: 0} + - match: {aggregations.mfs.doc_count: 0} + +--- +"Multi value field Max": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "max"} } } } + + - match: {hits.total: 15} + - match: {aggregations.mfs.doc_count: 14} + - match: {aggregations.mfs.fields.0.count: 14} + - match: {aggregations.mfs.fields.0.correlation.val1: 0.06838646533369998} + +--- +"Multi value field Min": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "min"} } } } + + - match: {hits.total: 15} + - match: {aggregations.mfs.doc_count: 14} + - match: {aggregations.mfs.fields.0.count: 14} + - match: {aggregations.mfs.fields.0.correlation.val1: -0.09777682707831963} + +--- +"Partially unmapped": + + - do: + search: + rest_total_hits_as_int: true + index: [test, unmapped] + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } + + - match: {hits.total: 15} + - match: {aggregations.mfs.doc_count: 13} + - match: {aggregations.mfs.fields.0.count: 13} + - match: {aggregations.mfs.fields.0.correlation.val1: -0.044997535185684244} + +--- +"Partially unmapped with missing defaults": + + - do: + search: + rest_total_hits_as_int: true + index: [test, unmapped] + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"], "missing" : {"val2" : 10, "vals" : 5 } } } } } + + - match: {hits.total: 15} + - match: {aggregations.mfs.doc_count: 15} + - match: {aggregations.mfs.fields.0.count: 15} + - match: {aggregations.mfs.fields.0.correlation.val2: 0.04028024709708195} + +--- +"With script": + + - do: + catch: /parsing_exception/ + search: + rest_total_hits_as_int: true + index: test + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["vals", "val3"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } } + +--- +"With script params": + + - do: + catch: /parsing_exception/ + search: + rest_total_hits_as_int: true + index: test + body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3", "vals"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } } diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/30_single_value_field.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats_single_value_field.yml similarity index 100% rename from modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/30_single_value_field.yml rename to modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/matrix_stats_single_value_field.yml diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/max_metric.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/max_metric.yml index 551e0c01d128..0163df5bbaeb 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/max_metric.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/max_metric.yml @@ -221,3 +221,45 @@ setup: - match: { aggregations.date_field_max.value: 1619827200000 } - match: { aggregations.date_field_max.value_as_string: "2021-05-01T00:00:00.000Z" } + +--- +"Counter field": + - skip: + version: " - 8.6.99" + reason: "counter field support added in 8.7" + + - do: + indices.create: + index: myindex + body: + mappings: + properties: + counter_field: + type : long + time_series_metric: counter + + - do: + bulk: + refresh: true + body: + - index: + _index: myindex + _id: "1" + - counter_field: 2 + - index: + _index: myindex + _id: "2" + - counter_field: 4 + - do: + search: + rest_total_hits_as_int: true + index: myindex + body: + aggs: + the_counter_max: + max: + field: counter_field + + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { aggregations.the_counter_max.value: 4 } diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/min_metric.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/min_metric.yml index 30c226f554c3..690b3d240096 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/min_metric.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/min_metric.yml @@ -175,3 +175,45 @@ setup: the_string_min: min: field: string_field + +--- +"Counter field": + - skip: + version: " - 8.6.99" + reason: "counter field support added in 8.7" + + - do: + indices.create: + index: myindex + body: + mappings: + properties: + counter_field: + type : long + time_series_metric: counter + + - do: + bulk: + refresh: true + body: + - index: + _index: myindex + _id: "1" + - counter_field: 2 + - index: + _index: myindex + _id: "2" + - counter_field: 4 + - do: + search: + rest_total_hits_as_int: true + index: myindex + body: + aggs: + the_counter_min: + min: + field: counter_field + + - match: { hits.total: 2 } + - length: { hits.hits: 2 } + - match: { aggregations.the_counter_min.value: 2 } diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml index 8a8ee0a0f0e0..01829a5c1204 100644 --- a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/time_series.yml @@ -54,7 +54,7 @@ setup: --- "Basic test": - skip: - version: " - 8.5.99" + version: " - 8.6.99" reason: Time series result serialization changed in 8.6.0 - do: @@ -74,8 +74,140 @@ setup: - match: { hits.total.value: 1 } - - length: { aggregations.ts.buckets: 1 } + - length: { aggregations: 1 } - match: { aggregations.ts.buckets.0.key: { "key": "foo" } } - match: { aggregations.ts.buckets.0.doc_count: 1 } +--- +"Size test": + - skip: + version: " - 8.6.99" + reason: Size added in 8.7.0 + + - do: + search: + index: tsdb + body: + query: + range: + "@timestamp": + gte: "2019-01-01T00:10:00Z" + size: 0 + aggs: + ts: + time_series: + keyed: false + size: 1 + + - length: { aggregations.ts.buckets: 1 } + - match: { aggregations.ts.buckets.0.key: { "key": "bar" } } + + - do: + search: + index: tsdb + body: + query: + range: + "@timestamp": + gte: "2019-01-01T00:10:00Z" + size: 0 + aggs: + ts: + time_series: + keyed: false + size: 3 + + - length: { aggregations.ts.buckets: 3 } + - match: { aggregations.ts.buckets.0.key: { "key": "bar" } } + - match: { aggregations.ts.buckets.1.key: { "key": "baz" } } + - match: { aggregations.ts.buckets.2.key: { "key": "foo" } } + +--- +"Sampler aggregation with nested time series aggregation failure": + - skip: + version: " - 8.6.99" + reason: "Handling for time series aggregation failures introduced in 8.7.0" + + - do: + catch: '/\[random_sampler\] aggregation \[sample\] does not support sampling \[time_series\] aggregation \[ts\]/' + search: + index: tsdb + body: + aggs: + sample: + random_sampler: + probability: 1.0 + aggs: + by_timestamp: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + aggs: + ts: + time_series: + keyed: false + aggs: + sum: + sum: + field: val + +--- +"Composite aggregation with nested time series aggregation failure": + - skip: + version: " - 8.6.99" + reason: "Handling for time series aggregation failures introduced in 8.7.0" + + - do: + catch: '/\[composite\] aggregation is incompatible with time series execution mode/' + search: + index: tsdb + body: + aggs: + by_key: + composite: + sources: [ + { + "key": { + "terms": { + "field": "key" + } + } + } + ] + aggs: + date: + date_histogram: + field: "@timestamp" + fixed_interval: "1h" + aggs: + ts: + time_series: + keyed: false + aggs: + sum: + sum: + field: val + +--- +"Global aggregation with nested time series aggregation failure": + - skip: + version: " - 8.6.99" + reason: "Handling for time series aggregation failures introduced in 8.7.0" + + - do: + catch: '/Time series aggregations cannot be used inside global aggregation./' + search: + index: tsdb + body: + aggs: + global: + global: {} + aggs: + ts: + time_series: + keyed: false + aggs: + sum: + sum: + field: val diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle deleted file mode 100644 index e0621544c446..000000000000 --- a/modules/aggs-matrix-stats/build.gradle +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' - -esplugin { - description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' - classname 'org.elasticsearch.search.aggregations.matrix.MatrixAggregationPlugin' -} - -restResources { - restApi { - include '_common', 'indices', 'cluster', 'index', 'search', 'nodes' - } -} diff --git a/modules/aggs-matrix-stats/src/main/java/module-info.java b/modules/aggs-matrix-stats/src/main/java/module-info.java deleted file mode 100644 index fa711a3b2b05..000000000000 --- a/modules/aggs-matrix-stats/src/main/java/module-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -module org.elasticsearch.aggs.matrix { - requires org.elasticsearch.base; - requires org.elasticsearch.server; - requires org.elasticsearch.xcontent; - requires org.apache.lucene.core; - - exports org.elasticsearch.search.aggregations.matrix; - exports org.elasticsearch.search.aggregations.matrix.stats; - - provides org.elasticsearch.plugins.spi.NamedXContentProvider - with - org.elasticsearch.search.aggregations.matrix.spi.MatrixStatsNamedXContentProvider; -} diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java deleted file mode 100644 index 57ff7d227cee..000000000000 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixAggregationPlugin.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.aggregations.matrix; - -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.search.aggregations.matrix.stats.InternalMatrixStats; -import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; -import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsParser; - -import java.util.List; - -import static java.util.Collections.singletonList; - -public class MatrixAggregationPlugin extends Plugin implements SearchPlugin { - @Override - public List getAggregations() { - return singletonList( - new AggregationSpec(MatrixStatsAggregationBuilder.NAME, MatrixStatsAggregationBuilder::new, new MatrixStatsParser()) - .addResultReader(InternalMatrixStats::new) - ); - } -} diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsAggregationBuilders.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsAggregationBuilders.java deleted file mode 100644 index 735a839bf357..000000000000 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsAggregationBuilders.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.search.aggregations.matrix; - -import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats; -import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder; - -public class MatrixStatsAggregationBuilders { - /** - * Create a new {@link MatrixStats} aggregation with the given name. - */ - public static MatrixStatsAggregationBuilder matrixStats(String name) { - return new MatrixStatsAggregationBuilder(name); - } -} diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java deleted file mode 100644 index b423fa2e5caf..000000000000 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStats.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.search.aggregations.matrix.stats; - -import org.elasticsearch.search.aggregations.Aggregation; - -/** - * Interface for MatrixStats Metric Aggregation - */ -public interface MatrixStats extends Aggregation { - /** return the total document count */ - long getDocCount(); - - /** return total field count (differs from docCount if there are missing values) */ - long getFieldCount(String field); - - /** return the field mean */ - double getMean(String field); - - /** return the field variance */ - double getVariance(String field); - - /** return the skewness of the distribution */ - double getSkewness(String field); - - /** return the kurtosis of the distribution */ - double getKurtosis(String field); - - /** return the covariance between field x and field y */ - double getCovariance(String fieldX, String fieldY); - - /** return the correlation coefficient of field x and field y */ - double getCorrelation(String fieldX, String fieldY); -} diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java deleted file mode 100644 index c42cea2778b0..000000000000 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsParser.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.search.aggregations.matrix.stats; - -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.aggregations.matrix.ArrayValuesSourceParser.NumericValuesSourceParser; -import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Map; - -import static org.elasticsearch.search.aggregations.matrix.ArrayValuesSourceAggregationBuilder.MULTIVALUE_MODE_FIELD; - -public class MatrixStatsParser extends NumericValuesSourceParser { - - public MatrixStatsParser() { - super(true); - } - - @Override - protected boolean token( - String aggregationName, - String currentFieldName, - XContentParser.Token token, - XContentParser parser, - Map otherOptions - ) throws IOException { - if (MULTIVALUE_MODE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - if (token == XContentParser.Token.VALUE_STRING) { - otherOptions.put(MULTIVALUE_MODE_FIELD, parser.text()); - return true; - } - } - return false; - } - - @Override - protected MatrixStatsAggregationBuilder createFactory( - String aggregationName, - ValuesSourceType valuesSourceType, - ValueType targetValueType, - Map otherOptions - ) { - MatrixStatsAggregationBuilder builder = new MatrixStatsAggregationBuilder(aggregationName); - String mode = (String) otherOptions.get(MULTIVALUE_MODE_FIELD); - if (mode != null) { - builder.multiValueMode(MultiValueMode.fromString(mode)); - } - return builder; - } -} diff --git a/modules/aggs-matrix-stats/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/modules/aggs-matrix-stats/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider deleted file mode 100644 index a2d706a39a60..000000000000 --- a/modules/aggs-matrix-stats/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider +++ /dev/null @@ -1 +0,0 @@ -org.elasticsearch.search.aggregations.matrix.spi.MatrixStatsNamedXContentProvider \ No newline at end of file diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java b/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java deleted file mode 100644 index 6f29a3fb765f..000000000000 --- a/modules/aggs-matrix-stats/src/yamlRestTest/java/org/elasticsearch/search/aggregations/matrix/MatrixStatsClientYamlTestSuiteIT.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.search.aggregations.matrix; - -import com.carrotsearch.randomizedtesting.annotations.Name; -import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - -import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; - -public class MatrixStatsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - public MatrixStatsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { - super(testCandidate); - } - - @ParametersFactory - public static Iterable parameters() throws Exception { - return ESClientYamlSuiteTestCase.createParameters(); - } -} diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/10_basic.yml b/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/10_basic.yml deleted file mode 100644 index 2416d2b2b314..000000000000 --- a/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/10_basic.yml +++ /dev/null @@ -1,16 +0,0 @@ -# Integration tests for Matrix Aggs Plugin -# -"Matrix stats aggs loaded": - - skip: - reason: "contains is a newly added assertion" - features: contains - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - - do: - nodes.info: {} - - - contains: { nodes.$master.modules: { name: aggs-matrix-stats } } diff --git a/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/40_multi_value_field.yml b/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/40_multi_value_field.yml deleted file mode 100644 index 295ac2160f23..000000000000 --- a/modules/aggs-matrix-stats/src/yamlRestTest/resources/rest-api-spec/test/stats/40_multi_value_field.yml +++ /dev/null @@ -1,197 +0,0 @@ ---- -setup: - - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 3 - number_of_routing_shards: 3 - mappings: - "properties": - "val1": - "type": "double" - "val2": - "type": "double" - "val3": - "type": "double" - - - do: - indices.create: - index: unmapped - body: - settings: - number_of_shards: 3 - - - do: - index: - index: test - id: "1" - body: { "val1": 1.9, "val2": 3.1, "val3": 2.3, "vals" : [1.9, 16.143] } - - do: - index: - index: test - id: "2" - body: { "val1": -5.2, "val2": -3.4, "val3": 2.3, "vals" : [155, 16.23]} - - do: - index: - index: test - id: "3" - body: { "val1": -5.2, "val3": 2.3, "vals" : [-455, -32.32]} - - do: - index: - index: test - id: "4" - body: { "val1": 18.3, "val2": 104.4, "val3": 2.3, "vals" : [0.14, 92.1]} - - do: - index: - index: test - id: "5" - body: { "val1": -53.2, "val2": -322.4, "val3": 2.3, "vals" : [16, 16]} - - do: - index: - index: test - id: "6" - body: { "val1": -578.9, "val2": 69.9, "val3": 2.3} - - do: - index: - index: test - id: "7" - body: { "val1": 16.2, "val2": 17.2, "val3": 2.3, "vals" : [1234.3, -3433]} - - do: - index: - index: test - id: "8" - body: { "val1": -4222.63, "val2": 316.44, "val3": 2.3, "vals" : [177.2, -93.333]} - - do: - index: - index: test - id: "9" - body: { "val1": -59999.55, "val2": -3163.4, "val3": 2.3, "vals" : [-29.9, 163.0]} - - do: - index: - index: test - id: "10" - body: { "val1": 782.7, "val2": 789.7, "val3": 2.3, "vals" : [-0.2, 1343.3]} - - do: - index: - index: test - id: "11" - body: { "val1": -1.2, "val2": 6.3, "val3": 2.3, "vals" : [15.3, 16.9]} - - do: - index: - index: test - id: "12" - body: { "val1": 0, "val2": 1.11, "val3": 2.3, "vals" : [-644.4, -644.4]} - - do: - index: - index: test - id: "13" - body: { "val1": 0.1, "val2": 0.92, "val3": 2.3, "vals" : [73.2, 0.12]} - - do: - index: - index: test - id: "14" - body: { "val1": 0.12, "val2": -82.4, "val3": 2.3, "vals" : [-0.001, 1295.3]} - - do: - index: - index: test - id: "15" - body: { "val1": 98.2, "val2": 32.4, "val3": 2.3, "vals" : [15.5, 16.5]} - - - do: - indices.refresh: - index: [test, unmapped] - - - do: - cluster.health: - wait_for_status: yellow - ---- -"Unmapped": - - - do: - search: - rest_total_hits_as_int: true - index: unmapped - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } - - - match: {hits.total: 0} - - match: {aggregations.mfs.doc_count: 0} - ---- -"Multi value field Max": - - - do: - search: - rest_total_hits_as_int: true - index: test - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "max"} } } } - - - match: {hits.total: 15} - - match: {aggregations.mfs.doc_count: 14} - - match: {aggregations.mfs.fields.0.count: 14} - - match: {aggregations.mfs.fields.0.correlation.val1: 0.06838646533369998} - ---- -"Multi value field Min": - - - do: - search: - rest_total_hits_as_int: true - index: test - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "min"} } } } - - - match: {hits.total: 15} - - match: {aggregations.mfs.doc_count: 14} - - match: {aggregations.mfs.fields.0.count: 14} - - match: {aggregations.mfs.fields.0.correlation.val1: -0.09777682707831963} - ---- -"Partially unmapped": - - - do: - search: - rest_total_hits_as_int: true - index: [test, unmapped] - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } - - - match: {hits.total: 15} - - match: {aggregations.mfs.doc_count: 13} - - match: {aggregations.mfs.fields.0.count: 13} - - match: {aggregations.mfs.fields.0.correlation.val1: -0.044997535185684244} - ---- -"Partially unmapped with missing defaults": - - - do: - search: - rest_total_hits_as_int: true - index: [test, unmapped] - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"], "missing" : {"val2" : 10, "vals" : 5 } } } } } - - - match: {hits.total: 15} - - match: {aggregations.mfs.doc_count: 15} - - match: {aggregations.mfs.fields.0.count: 15} - - match: {aggregations.mfs.fields.0.correlation.val2: 0.04028024709708195} - ---- -"With script": - - - do: - catch: /parsing_exception/ - search: - rest_total_hits_as_int: true - index: test - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["vals", "val3"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } } - ---- -"With script params": - - - do: - catch: /parsing_exception/ - search: - rest_total_hits_as_int: true - index: test - body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3", "vals"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index d1345695d8a1..9988d70b6365 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 3ccc257fc33b..a047f2ee90ad 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -102,7 +102,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationCategory; @@ -166,7 +166,7 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { this.scriptServiceHolder.set(scriptService); return Collections.emptyList(); diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java index 9f10612e116c..05926db14d97 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java @@ -11,7 +11,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -81,7 +81,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer unused, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { final APMTracer apmTracer = tracer.get(); @@ -101,6 +101,7 @@ public List> getSettings() { APMAgentSettings.APM_ENABLED_SETTING, APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING, APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING, + APMAgentSettings.APM_TRACING_SANITIZE_FIELD_NAMES, APMAgentSettings.APM_AGENT_SETTINGS, APMAgentSettings.APM_SECRET_TOKEN_SETTING, APMAgentSettings.APM_API_KEY_SETTING diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java index 140d48027a59..1b432c9d3ad3 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java @@ -57,6 +57,7 @@ void addClusterSettingsListeners(ClusterService clusterService, APMTracer apmTra }); clusterSettings.addSettingsUpdateConsumer(APM_TRACING_NAMES_INCLUDE_SETTING, apmTracer::setIncludeNames); clusterSettings.addSettingsUpdateConsumer(APM_TRACING_NAMES_EXCLUDE_SETTING, apmTracer::setExcludeNames); + clusterSettings.addSettingsUpdateConsumer(APM_TRACING_SANITIZE_FIELD_NAMES, apmTracer::setLabelFilters); clusterSettings.addAffixMapUpdateConsumer(APM_AGENT_SETTINGS, map -> map.forEach(this::setAgentSetting), (x, y) -> {}); } @@ -143,6 +144,27 @@ void setAgentSetting(String key, String value) { NodeScope ); + static final Setting> APM_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( + APM_SETTING_PREFIX + "sanitize_field_names", + List.of( + "password", + "passwd", + "pwd", + "secret", + "*key", + "*token*", + "*session*", + "*credit*", + "*card*", + "*auth*", + "*principal*", + "set-cookie" + ), + Function.identity(), + OperatorDynamic, + NodeScope + ); + static final Setting APM_ENABLED_SETTING = Setting.boolSetting( APM_SETTING_PREFIX + "enabled", false, diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java index fad42b2768c3..5e8375e067fe 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java @@ -44,6 +44,7 @@ import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING; +import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_SANITIZE_FIELD_NAMES; /** * This is an implementation of the {@link org.elasticsearch.tracing.Tracer} interface, which uses @@ -65,8 +66,10 @@ public class APMTracer extends AbstractLifecycleComponent implements org.elastic private List includeNames; private List excludeNames; + private List labelFilters; /** Built using {@link #includeNames} and {@link #excludeNames}, and filters out spans based on their name. */ private volatile CharacterRunAutomaton filterAutomaton; + private volatile CharacterRunAutomaton labelFilterAutomaton; private String clusterName; private String nodeName; @@ -86,7 +89,10 @@ record APMServices(Tracer tracer, OpenTelemetry openTelemetry) {} public APMTracer(Settings settings) { this.includeNames = APM_TRACING_NAMES_INCLUDE_SETTING.get(settings); this.excludeNames = APM_TRACING_NAMES_EXCLUDE_SETTING.get(settings); + this.labelFilters = APM_TRACING_SANITIZE_FIELD_NAMES.get(settings); + this.filterAutomaton = buildAutomaton(includeNames, excludeNames); + this.labelFilterAutomaton = buildAutomaton(labelFilters, List.of()); this.enabled = APM_ENABLED_SETTING.get(settings); } @@ -109,6 +115,16 @@ void setExcludeNames(List excludeNames) { this.filterAutomaton = buildAutomaton(includeNames, excludeNames); } + void setLabelFilters(List labelFilters) { + this.labelFilters = labelFilters; + this.labelFilterAutomaton = buildAutomaton(labelFilters, List.of()); + } + + // package-private for testing + CharacterRunAutomaton getLabelFilterAutomaton() { + return labelFilterAutomaton; + } + @Override protected void doStart() { if (enabled) { @@ -271,6 +287,12 @@ private void setSpanAttributes(@Nullable Map spanAttributes, Spa for (Map.Entry entry : spanAttributes.entrySet()) { final String key = entry.getKey(); final Object value = entry.getValue(); + + if (this.labelFilterAutomaton.run(key)) { + spanBuilder.setAttribute(key, "[REDACTED]"); + continue; + } + if (value instanceof String) { spanBuilder.setAttribute(key, (String) value); } else if (value instanceof Long) { @@ -394,9 +416,9 @@ Map getSpans() { return spans; } - private static CharacterRunAutomaton buildAutomaton(List includeNames, List excludeNames) { - Automaton includeAutomaton = patternsToAutomaton(includeNames); - Automaton excludeAutomaton = patternsToAutomaton(excludeNames); + private static CharacterRunAutomaton buildAutomaton(List includePatterns, List excludePatterns) { + Automaton includeAutomaton = patternsToAutomaton(includePatterns); + Automaton excludeAutomaton = patternsToAutomaton(excludePatterns); if (includeAutomaton == null) { includeAutomaton = Automata.makeAnyString(); diff --git a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java b/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java index f4ab36d43aa4..9c6582fcc118 100644 --- a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java @@ -8,12 +8,14 @@ package org.elasticsearch.tracing.apm; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import java.util.List; +import java.util.stream.Stream; import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; @@ -166,6 +168,43 @@ public void test_whenTraceStarted_andSpanNameExcluded_thenSpanIsNotStarted() { assertThat(apmTracer.getSpans(), hasKey("id3")); } + /** + * Check that sensitive attributes are not added verbatim to a span, but instead the value is redacted. + */ + public void test_whenAddingAttributes_thenSensitiveValuesAreRedacted() { + Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), false).build(); + APMTracer apmTracer = buildTracer(settings); + CharacterRunAutomaton labelFilterAutomaton = apmTracer.getLabelFilterAutomaton(); + + Stream.of( + "auth", + "auth-header", + "authValue", + "card", + "card-details", + "card-number", + "credit", + "credit-card", + "key", + "my-credit-number", + "my_session_id", + "passwd", + "password", + "principal", + "principal-value", + "pwd", + "secret", + "secure-key", + "sensitive-token*", + "session", + "session_id", + "set-cookie", + "some-auth", + "some-principal", + "token-for login" + ).forEach(key -> assertTrue("Expected label filter automaton to redact [" + key + "]", labelFilterAutomaton.run(key))); + } + private APMTracer buildTracer(Settings settings) { APMTracer tracer = new APMTracer(settings); tracer.doStart(); diff --git a/modules/data-streams/build.gradle b/modules/data-streams/build.gradle index 49136712b855..fa3346e26ab2 100644 --- a/modules/data-streams/build.gradle +++ b/modules/data-streams/build.gradle @@ -2,9 +2,9 @@ import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.internal-cluster-test' -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.internal-java-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'Elasticsearch Expanded Pack Plugin - Data Streams' diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index de30aeaa641e..dc073efd925e 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -100,9 +100,11 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; @@ -746,7 +748,14 @@ public void testDataSteamAliasWithFilter() throws Exception { equalTo( Map.of( "logs-foobar", - List.of(new DataStreamAlias("foo", List.of("logs-foobar"), null, Map.of("term", Map.of("type", Map.of("value", "y"))))) + List.of( + new DataStreamAlias( + "foo", + List.of("logs-foobar"), + null, + Map.of("logs-foobar", Map.of("term", Map.of("type", Map.of("value", "y")))) + ) + ) ) ) ); @@ -771,7 +780,14 @@ public void testDataSteamAliasWithFilter() throws Exception { equalTo( Map.of( "logs-foobar", - List.of(new DataStreamAlias("foo", List.of("logs-foobar"), null, Map.of("term", Map.of("type", Map.of("value", "x"))))) + List.of( + new DataStreamAlias( + "foo", + List.of("logs-foobar"), + null, + Map.of("logs-foobar", Map.of("term", Map.of("type", Map.of("value", "x")))) + ) + ) ) ) ); @@ -790,17 +806,17 @@ public void testRandomDataSteamAliasesUpdate() throws Exception { String alias = randomAlphaOfLength(4); String[] dataStreams = Arrays.stream(generateRandomStringArray(16, 4, false, false)) .map(s -> "log-" + s.toLowerCase(Locale.ROOT)) + .distinct() .toArray(String[]::new); for (String dataStream : dataStreams) { CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStream); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); } - AliasActions addAction = new AliasActions(AliasActions.Type.ADD).aliases(alias) - .indices(dataStreams) - .filter(Map.of("term", Map.of("type", Map.of("value", "y")))); + Map indexFilters = Map.of("term", Map.of("type", Map.of("value", "y"))); + AliasActions addAction = new AliasActions(AliasActions.Type.ADD).aliases(alias).indices(dataStreams).filter(indexFilters); assertAcked(client().admin().indices().aliases(new IndicesAliasesRequest().addAliasAction(addAction)).actionGet()); - addAction = new AliasActions(AliasActions.Type.ADD).aliases(alias).indices(dataStreams[0]).writeIndex(true); + addAction = new AliasActions(AliasActions.Type.ADD).aliases(alias).indices(dataStreams[0]).filter(indexFilters).writeIndex(true); assertAcked(client().admin().indices().aliases(new IndicesAliasesRequest().addAliasAction(addAction)).actionGet()); GetAliasesResponse response = client().admin().indices().getAliases(new GetAliasesRequest()).actionGet(); @@ -815,7 +831,16 @@ public void testRandomDataSteamAliasesUpdate() throws Exception { assertThat(result.get(0).getName(), equalTo(alias)); assertThat(result.get(0).getDataStreams(), containsInAnyOrder(dataStreams)); assertThat(result.get(0).getWriteDataStream(), equalTo(dataStreams[0])); - assertThat(result.get(0).getFilter().string(), equalTo("{\"term\":{\"type\":{\"value\":\"y\"}}}")); + for (String dataStream : dataStreams) { + assertThat( + result.stream() + .map(resultAlias -> resultAlias.getFilter(dataStream)) + .filter(Objects::nonNull) + .map(CompressedXContent::string) + .collect(Collectors.toSet()), + containsInAnyOrder("{\"term\":{\"type\":{\"value\":\"y\"}}}") + ); + } } public void testDataSteamAliasWithMalformedFilter() throws Exception { @@ -2024,24 +2049,7 @@ public void testWriteIndexWriteLoadAndAvgShardSizeIsStoredAfterRollover() throws final var request = new CreateDataStreamAction.Request(dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); - assertBusy(() -> { - for (int i = 0; i < 10; i++) { - indexDocs(dataStreamName, randomIntBetween(100, 200)); - } - - final ClusterState clusterState = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); - final DataStream dataStream = clusterState.getMetadata().dataStreams().get(dataStreamName); - final String writeIndex = dataStream.getWriteIndex().getName(); - final IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(writeIndex).get(); - for (IndexShardStats indexShardStats : indicesStatsResponse.getIndex(writeIndex).getIndexShards().values()) { - for (ShardStats shard : indexShardStats.getShards()) { - final IndexingStats.Stats shardIndexingStats = shard.getStats().getIndexing().getTotal(); - // Ensure that we have enough clock granularity before rolling over to ensure that we capture _some_ write load - assertThat(shardIndexingStats.getTotalActiveTimeInMillis(), is(greaterThan(0L))); - assertThat(shardIndexingStats.getWriteLoad(), is(greaterThan(0.0))); - } - } - }); + indexDocsAndEnsureThereIsCapturedWriteLoad(dataStreamName); assertAcked(client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet()); final ClusterState clusterState = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); @@ -2078,6 +2086,7 @@ public void testWriteLoadAndAvgShardSizeIsStoredInABestEffort() throws Exception // - We want to simulate two possible cases here: // - All the assigned nodes for shard 0 will fail to respond to the IndicesStatsRequest // - Only the shard 1 replica will respond successfully to the IndicesStatsRequest ensuring that we fall back in that case + // (only if it's not co-located with some other shard copies) final List dataOnlyNodes = internalCluster().startDataOnlyNodes(4); final String dataStreamName = "logs-es"; @@ -2090,21 +2099,22 @@ public void testWriteLoadAndAvgShardSizeIsStoredInABestEffort() throws Exception DataStreamIT.putComposableIndexTemplate("my-template", null, List.of("logs-*"), indexSettings, null); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); + ensureGreen(dataStreamName); - for (int i = 0; i < 10; i++) { - indexDocs(dataStreamName, randomIntBetween(100, 200)); - } + indexDocsAndEnsureThereIsCapturedWriteLoad(dataStreamName); final ClusterState clusterStateBeforeRollover = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); final DataStream dataStreamBeforeRollover = clusterStateBeforeRollover.getMetadata().dataStreams().get(dataStreamName); final IndexRoutingTable currentDataStreamWriteIndexRoutingTable = clusterStateBeforeRollover.routingTable() .index(dataStreamBeforeRollover.getWriteIndex()); - final List failingIndicesStatsNodeIds = new ArrayList<>(); + final Set failingIndicesStatsNodeIds = new HashSet<>(); for (ShardRouting shardRouting : currentDataStreamWriteIndexRoutingTable.shard(0).assignedShards()) { failingIndicesStatsNodeIds.add(shardRouting.currentNodeId()); } failingIndicesStatsNodeIds.add(currentDataStreamWriteIndexRoutingTable.shard(1).primaryShard().currentNodeId()); + final String shard1ReplicaNodeId = currentDataStreamWriteIndexRoutingTable.shard(1).replicaShards().get(0).currentNodeId(); + final boolean shard1ReplicaIsAllocatedInAReachableNode = failingIndicesStatsNodeIds.contains(shard1ReplicaNodeId) == false; for (String nodeId : failingIndicesStatsNodeIds) { String nodeName = clusterStateBeforeRollover.nodes().resolveNode(nodeId).getName(); @@ -2114,7 +2124,12 @@ public void testWriteLoadAndAvgShardSizeIsStoredInABestEffort() throws Exception (handler, request, channel, task) -> channel.sendResponse(new RuntimeException("Unable to get stats")) ); } - assertThat(failingIndicesStatsNodeIds.size(), is(equalTo(3))); + + logger.info( + "--> Node IDs failing to respond to stats requests {}, shard 1 replica routing {}", + failingIndicesStatsNodeIds, + currentDataStreamWriteIndexRoutingTable.shard(1).replicaShards().get(0) + ); assertAcked(client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet()); final ClusterState clusterState = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); @@ -2124,8 +2139,9 @@ public void testWriteLoadAndAvgShardSizeIsStoredInABestEffort() throws Exception final IndexMetadata indexMetadata = clusterState.metadata().index(index); final IndexMetadataStats metadataStats = indexMetadata.getStats(); - if (index.equals(dataStream.getWriteIndex()) == false) { - assertThat(metadataStats, is(notNullValue())); + // If all the shards are co-located within the failing nodes, no stats will be stored during rollover + if (index.equals(dataStream.getWriteIndex()) == false && shard1ReplicaIsAllocatedInAReachableNode) { + assertThat("Expected stats for index " + index, metadataStats, is(notNullValue())); final IndexWriteLoad indexWriteLoad = metadataStats.writeLoad(); // All stats request performed against nodes holding the shard 0 failed @@ -2247,6 +2263,27 @@ public void testShardSizeIsForecastedDuringRollover() throws Exception { assertThat(forecastedShardSizeInBytes.getAsLong(), is(equalTo(expectedTotalSizeInBytes / shardCount))); } + private void indexDocsAndEnsureThereIsCapturedWriteLoad(String dataStreamName) throws Exception { + assertBusy(() -> { + for (int i = 0; i < 10; i++) { + indexDocs(dataStreamName, randomIntBetween(100, 200)); + } + + final ClusterState clusterState = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state(); + final DataStream dataStream = clusterState.getMetadata().dataStreams().get(dataStreamName); + final String writeIndex = dataStream.getWriteIndex().getName(); + final IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(writeIndex).get(); + for (IndexShardStats indexShardStats : indicesStatsResponse.getIndex(writeIndex).getIndexShards().values()) { + for (ShardStats shard : indexShardStats.getShards()) { + final IndexingStats.Stats shardIndexingStats = shard.getStats().getIndexing().getTotal(); + // Ensure that we have enough clock granularity before rolling over to ensure that we capture _some_ write load + assertThat(shardIndexingStats.getTotalActiveTimeInMillis(), is(greaterThan(0L))); + assertThat(shardIndexingStats.getWriteLoad(), is(greaterThan(0.0))); + } + } + }); + } + static void putComposableIndexTemplate( String id, @Nullable String mappings, diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index bf7d9ecc5fc4..094a3f08f492 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -180,7 +180,7 @@ public void testSnapshotAndRestore() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getName(), equalTo("my-alias")); assertThat( - getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getWriteDataStream(), equalTo("other-ds")); @@ -188,7 +188,7 @@ public void testSnapshotAndRestore() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getName(), equalTo("my-alias")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); } @@ -371,7 +371,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio "my-alias", List.of(dataStreamToSnapshot), "other-ds".equals(dataStreamToSnapshot) ? "other-ds" : null, - Map.of("match_all", Map.of("boost", 1f)) + Map.of("other-ds", Map.of("match_all", Map.of("boost", 1f)), "ds", Map.of("match_all", Map.of("boost", 1f))) ) ) ) @@ -419,7 +419,7 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getDataStreams(), containsInAnyOrder("ds", "other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").size(), equalTo(1)); @@ -427,7 +427,7 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getDataStreams(), containsInAnyOrder("ds", "other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); @@ -473,7 +473,7 @@ public void testSnapshotAndRestoreAll() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getDataStreams(), containsInAnyOrder("ds", "other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); @@ -482,7 +482,7 @@ public void testSnapshotAndRestoreAll() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getDataStreams(), containsInAnyOrder("ds", "other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); @@ -567,21 +567,21 @@ public void testRename() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("ds2").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds2").get(0).getName(), equalTo("my-alias")); assertThat( - getAliasesResponse.getDataStreamAliases().get("ds2").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("ds2").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getName(), equalTo("my-alias")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getName(), equalTo("my-alias")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getWriteDataStream(), equalTo("other-ds")); assertThat( - getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); } @@ -621,21 +621,21 @@ public void testRenameWriteDataStream() throws Exception { assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds2").get(0).getName(), equalTo("my-alias")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds2").get(0).getWriteDataStream(), equalTo("other-ds2")); assertThat( - getAliasesResponse.getDataStreamAliases().get("other-ds2").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("other-ds2").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getName(), equalTo("my-alias")); assertThat(getAliasesResponse.getDataStreamAliases().get("ds").get(0).getWriteDataStream(), equalTo("other-ds2")); assertThat( - getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").size(), equalTo(1)); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getName(), equalTo("my-alias")); assertThat(getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getWriteDataStream(), equalTo("other-ds2")); assertThat( - getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter().string(), + getAliasesResponse.getDataStreamAliases().get("other-ds").get(0).getFilter("ds").string(), equalTo("{\"match_all\":{\"boost\":1.0}}") ); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java index 0043df507733..f6af27cfa8f1 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataTierDataStreamIT.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; @@ -48,29 +47,27 @@ public void testDefaultDataStreamAllocateToHot() { PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("template").indexTemplate(template) ).actionGet(); - client().prepareIndex(index).setCreate(true).setId("1").setSource("@timestamp", "2020-09-09").setWaitForActiveShards(0).get(); - Settings idxSettings = client().admin() - .indices() - .prepareGetIndex() - .addIndices(index) + var dsIndexName = client().prepareIndex(index) + .setCreate(true) + .setId("1") + .setSource("@timestamp", "2020-09-09") + .setWaitForActiveShards(0) .get() - .getSettings() - .get(DataStream.getDefaultBackingIndexName(index, 1)); + .getIndex(); + var idxSettings = client().admin().indices().prepareGetIndex().addIndices(index).get().getSettings().get(dsIndexName); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT)); logger.info("--> waiting for {} to be yellow", index); ensureYellow(index); // Roll over index and ensure the second index also went to the "hot" tier - client().admin().indices().prepareRolloverIndex(index).get(); - idxSettings = client().admin() - .indices() - .prepareGetIndex() - .addIndices(index) - .get() - .getSettings() - .get(DataStream.getDefaultBackingIndexName(index, 2)); + var rolledOverIndexName = client().admin().indices().prepareRolloverIndex(index).get().getNewIndex(); + + // new index name should have the rolled over name + assertNotEquals(dsIndexName, rolledOverIndexName); + + idxSettings = client().admin().indices().prepareGetIndex().addIndices(index).get().getSettings().get(rolledOverIndexName); assertThat(DataTier.TIER_PREFERENCE_SETTING.get(idxSettings), equalTo(DataTier.DATA_HOT)); } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java index 13d0c067b0cb..9ae76394011c 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.junit.Before; import java.io.IOException; import java.time.Instant; @@ -35,6 +36,14 @@ public class TsdbDataStreamRestIT extends ESRestTestCase { + private static final String COMPONENT_TEMPLATE = """ + { + "template": { + "settings": {} + } + } + """; + private static final String TEMPLATE = """ { "index_patterns": ["k8s*"], @@ -97,6 +106,7 @@ public class TsdbDataStreamRestIT extends ESRestTestCase { } } }, + "composed_of": ["custom_template"], "data_stream": { } }"""; @@ -190,12 +200,19 @@ public class TsdbDataStreamRestIT extends ESRestTestCase { {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876eb4", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}} """; - public void testTsdbDataStreams() throws Exception { - // Create a template - var putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(TEMPLATE); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); + @Before + public void setup() throws IOException { + // Add component template: + var request = new Request("POST", "/_component_template/custom_template"); + request.setJsonEntity(COMPONENT_TEMPLATE); + assertOK(client().performRequest(request)); + // Add composable index template + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(TEMPLATE); + assertOK(client().performRequest(request)); + } + public void testTsdbDataStreams() throws Exception { var bulkRequest = new Request("POST", "/k8s/_bulk"); bulkRequest.setJsonEntity(BULK.replace("$now", formatInstant(Instant.now()))); bulkRequest.addParameter("refresh", "true"); @@ -331,10 +348,6 @@ public void testTsdbDataStreamsNanos() throws Exception { } public void testSimulateTsdbDataStreamTemplate() throws Exception { - var putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(TEMPLATE); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - var simulateIndexTemplateRequest = new Request("POST", "/_index_template/_simulate_index/k8s"); var response = client().performRequest(simulateIndexTemplateRequest); assertOK(response); @@ -353,11 +366,6 @@ public void testSimulateTsdbDataStreamTemplate() throws Exception { } public void testSubsequentRollovers() throws Exception { - // Create a template - var putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(TEMPLATE); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - var createDataStreamRequest = new Request("PUT", "/_data_stream/k8s"); assertOK(client().performRequest(createDataStreamRequest)); @@ -460,16 +468,10 @@ public void testMigrateRegularDataStreamToTsdbDataStream() throws Exception { assertThat(e.getMessage(), containsString("is outside of ranges of currently writable indices")); } - public void testChangeTemplateIndexMode() throws Exception { - // Create a template - { - var putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); - putComposableIndexTemplateRequest.setJsonEntity(TEMPLATE); - assertOK(client().performRequest(putComposableIndexTemplateRequest)); - } + public void testDowngradeTsdbDataStreamToRegularDataStream() throws Exception { + var time = Instant.now(); { var indexRequest = new Request("POST", "/k8s/_doc"); - var time = Instant.now(); indexRequest.setJsonEntity(DOC.replace("$time", formatInstant(time))); var response = client().performRequest(indexRequest); assertOK(response); @@ -477,16 +479,91 @@ public void testChangeTemplateIndexMode() throws Exception { { var putComposableIndexTemplateRequest = new Request("POST", "/_index_template/1"); putComposableIndexTemplateRequest.setJsonEntity(NON_TSDB_TEMPLATE); - var e = expectThrows(ResponseException.class, () -> client().performRequest(putComposableIndexTemplateRequest)); - assertThat( - e.getMessage(), - containsString( - "composable template [1] with index patterns [k8s*], priority [null]," - + " index.routing_path [] would cause tsdb data streams [k8s] to no longer match a data stream template" - + " with a time_series index_mode" - ) - ); + client().performRequest(putComposableIndexTemplateRequest); } + { + { + // check prior to rollover + var getDataStreamsRequest = new Request("GET", "/_data_stream"); + var getDataStreamResponse = client().performRequest(getDataStreamsRequest); + assertOK(getDataStreamResponse); + var dataStreams = entityAsMap(getDataStreamResponse); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo("k8s")); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.indices"), hasSize(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.time_series"), notNullValue()); + } + var rolloverRequest = new Request("POST", "/k8s/_rollover"); + var rolloverResponse = client().performRequest(rolloverRequest); + assertOK(rolloverResponse); + var rolloverResponseBody = entityAsMap(rolloverResponse); + assertThat(rolloverResponseBody.get("rolled_over"), is(true)); + { + // Data stream is no longer a tsdb data stream + var getDataStreamsRequest = new Request("GET", "/_data_stream"); + var getDataStreamResponse = client().performRequest(getDataStreamsRequest); + assertOK(getDataStreamResponse); + var dataStreams = entityAsMap(getDataStreamResponse); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo("k8s")); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.indices"), hasSize(2)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.time_series"), nullValue()); + } + { + // old index remains a tsdb index + var oldIndex = (String) rolloverResponseBody.get("old_index"); + assertThat(oldIndex, backingIndexEqualTo("k8s", 1)); + var indices = getIndex(oldIndex); + var escapedBackingIndex = oldIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo("k8s")); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), equalTo("time_series")); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"), notNullValue()); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"), notNullValue()); + } + { + // new index is a regular index + var newIndex = (String) rolloverResponseBody.get("new_index"); + assertThat(newIndex, backingIndexEqualTo("k8s", 2)); + var indices = getIndex(newIndex); + var escapedBackingIndex = newIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo("k8s")); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), nullValue()); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"), nullValue()); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"), nullValue()); + } + } + { + // All documents should be ingested into the most recent backing index: + // (since the data stream is no longer a tsdb data stream) + Instant[] timestamps = new Instant[] { + time, + time.plusSeconds(1), + time.plusSeconds(5), + time.minus(30, ChronoUnit.DAYS), + time.plus(30, ChronoUnit.DAYS) }; + for (Instant timestamp : timestamps) { + var indexRequest = new Request("POST", "/k8s/_doc"); + indexRequest.setJsonEntity(DOC.replace("$time", formatInstant(timestamp))); + var response = client().performRequest(indexRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat((String) responseBody.get("_index"), backingIndexEqualTo("k8s", 2)); + } + } + } + + public void testUpdateComponentTemplateDoesNotFailIndexTemplateValidation() throws IOException { + var request = new Request("POST", "/_component_template/custom_template"); + request.setJsonEntity(""" + { + "template": { + "settings": { + "index": { + "number_of_replicas": 1 + } + } + } + } + """); + client().performRequest(request); } private static Map getIndex(String indexName) throws IOException { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index 7b932b943257..ee085aab09ec 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -74,7 +74,7 @@ public Settings getAdditionalIndexSettings( if (migrating) { indexMode = IndexMode.TIME_SERIES; } else if (dataStream != null) { - indexMode = dataStream.getIndexMode(); + indexMode = timeSeries ? dataStream.getIndexMode() : null; } else if (timeSeries) { indexMode = IndexMode.TIME_SERIES; } else { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 1a90db516dde..b07ab4c5d5c0 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -20,7 +20,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -117,7 +117,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { var service = new UpdateTimeSeriesRangeService(environment.settings(), threadPool, clusterService); this.service.set(service); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index a2c9959f62b8..4e51f605fc3c 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -168,18 +168,11 @@ protected DataStreamsStatsAction.DataStreamShardStats readShardResult(StreamInpu } @Override - protected DataStreamsStatsAction.Response newResponse( - DataStreamsStatsAction.Request request, - int totalShards, - int successfulShards, - int failedShards, - List dataStreamShardStats, - List shardFailures, - ClusterState clusterState - ) { + protected + TransportBroadcastByNodeAction.ResponseFactory + getResponseFactory(DataStreamsStatsAction.Request request, ClusterState clusterState) { Map aggregatedDataStreamsStats = new HashMap<>(); Set allBackingIndices = new HashSet<>(); - long totalStoreSizeBytes = 0L; SortedMap indicesLookup = clusterState.getMetadata().getIndicesLookup(); // Collect the number of backing indices from the cluster state. If every shard operation for an index fails, @@ -203,43 +196,75 @@ protected DataStreamsStatsAction.Response newResponse( } } - for (DataStreamsStatsAction.DataStreamShardStats shardStat : dataStreamShardStats) { - String indexName = shardStat.getShardRouting().getIndexName(); - IndexAbstraction indexAbstraction = indicesLookup.get(indexName); - IndexAbstraction.DataStream dataStream = indexAbstraction.getParentDataStream(); - assert dataStream != null; + return new ResponseFactory(indicesLookup, allBackingIndices, aggregatedDataStreamsStats); + } + + private class ResponseFactory + implements + TransportBroadcastByNodeAction.ResponseFactory { - // Aggregate global stats - totalStoreSizeBytes += shardStat.getStoreStats().sizeInBytes(); + private final SortedMap indicesLookup; + private final Set allBackingIndices; + private final Map aggregatedDataStreamsStats; - // Aggregate data stream stats - AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); - stats.storageBytes += shardStat.getStoreStats().sizeInBytes(); - stats.maxTimestamp = Math.max(stats.maxTimestamp, shardStat.getMaxTimestamp()); + ResponseFactory( + SortedMap indicesLookup, + Set allBackingIndices, + Map aggregatedDataStreamsStats + ) { + this.indicesLookup = indicesLookup; + this.allBackingIndices = allBackingIndices; + this.aggregatedDataStreamsStats = aggregatedDataStreamsStats; } - DataStreamsStatsAction.DataStreamStats[] dataStreamStats = aggregatedDataStreamsStats.entrySet() - .stream() - .map( - entry -> new DataStreamsStatsAction.DataStreamStats( - entry.getKey(), - entry.getValue().backingIndices.size(), - ByteSizeValue.ofBytes(entry.getValue().storageBytes), - entry.getValue().maxTimestamp + @Override + public DataStreamsStatsAction.Response newResponse( + int totalShards, + int successfulShards, + int failedShards, + List dataStreamShardStats, + List shardFailures + ) { + long totalStoreSizeBytes = 0L; + + for (DataStreamsStatsAction.DataStreamShardStats shardStat : dataStreamShardStats) { + String indexName = shardStat.getShardRouting().getIndexName(); + IndexAbstraction indexAbstraction = indicesLookup.get(indexName); + IndexAbstraction.DataStream dataStream = indexAbstraction.getParentDataStream(); + assert dataStream != null; + + // Aggregate global stats + totalStoreSizeBytes += shardStat.getStoreStats().sizeInBytes(); + + // Aggregate data stream stats + AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); + stats.storageBytes += shardStat.getStoreStats().sizeInBytes(); + stats.maxTimestamp = Math.max(stats.maxTimestamp, shardStat.getMaxTimestamp()); + } + + DataStreamsStatsAction.DataStreamStats[] dataStreamStats = aggregatedDataStreamsStats.entrySet() + .stream() + .map( + entry -> new DataStreamsStatsAction.DataStreamStats( + entry.getKey(), + entry.getValue().backingIndices.size(), + ByteSizeValue.ofBytes(entry.getValue().storageBytes), + entry.getValue().maxTimestamp + ) ) - ) - .toArray(DataStreamsStatsAction.DataStreamStats[]::new); - - return new DataStreamsStatsAction.Response( - totalShards, - successfulShards, - failedShards, - shardFailures, - aggregatedDataStreamsStats.size(), - allBackingIndices.size(), - ByteSizeValue.ofBytes(totalStoreSizeBytes), - dataStreamStats - ); + .toArray(DataStreamsStatsAction.DataStreamStats[]::new); + + return new DataStreamsStatsAction.Response( + totalShards, + successfulShards, + failedShards, + shardFailures, + aggregatedDataStreamsStats.size(), + allBackingIndices.size(), + ByteSizeValue.ofBytes(totalStoreSizeBytes), + dataStreamStats + ); + } } private static class AggregatedStats { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index b7aceb6b0b46..b60d5af917cb 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.indices.rollover.MetadataRolloverService; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; @@ -248,6 +249,7 @@ public void setup() throws Exception { when(env.sharedDataFile()).thenReturn(null); AllocationService allocationService = mock(AllocationService.class); when(allocationService.reroute(any(ClusterState.class), any(String.class), any())).then(i -> i.getArguments()[0]); + when(allocationService.getShardRoutingRoleStrategy()).thenReturn(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); ShardLimitValidator shardLimitValidator = new ShardLimitValidator(Settings.EMPTY, clusterService); createIndexService = new MetadataCreateIndexService( Settings.EMPTY, diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 31c702cc4f88..6cc32996b493 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexMode; @@ -25,7 +26,10 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.List; +import java.util.Map; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createFirstBackingIndex; +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.common.settings.Settings.builder; import static org.elasticsearch.datastreams.DataStreamIndexSettingsProvider.FORMATTER; import static org.hamcrest.Matchers.contains; @@ -34,6 +38,8 @@ public class DataStreamIndexSettingsProviderTests extends ESTestCase { + private static final TimeValue DEFAULT_LOOK_AHEAD_TIME = TimeValue.timeValueHours(2); // default + DataStreamIndexSettingsProvider provider; @Before @@ -48,7 +54,6 @@ public void testGetAdditionalIndexSettings() throws Exception { String dataStreamName = "logs-app1"; Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default Settings settings = Settings.EMPTY; String mapping = """ { @@ -78,8 +83,8 @@ public void testGetAdditionalIndexSettings() throws Exception { List.of(new CompressedXContent(mapping)) ); assertThat(result.size(), equalTo(3)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); - assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), contains("field3")); } @@ -309,7 +314,7 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi assertThat( e.getMessage(), equalTo( - formatted( + Strings.format( "backing index [%s] in tsdb mode doesn't have the [index.time_series.end_time] index setting", DataStream.getDefaultBackingIndexName(dataStreamName, 1, twoHoursAgo.toEpochMilli()) ) @@ -334,6 +339,55 @@ public void testGetAdditionalIndexSettingsNonTsdbTemplate() { assertThat(result.size(), equalTo(0)); } + public void testGetAdditionalIndexSettingsMigrateToTsdb() { + Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); + String dataStreamName = "logs-app1"; + IndexMetadata idx = createFirstBackingIndex(dataStreamName).build(); + DataStream existingDataStream = newInstance(dataStreamName, List.of(idx.getIndex())); + Metadata metadata = Metadata.builder().dataStreams(Map.of(dataStreamName, existingDataStream), Map.of()).build(); + + Settings settings = Settings.EMPTY; + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + true, + metadata, + now, + settings, + List.of() + ); + assertThat(result.size(), equalTo(2)); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + } + + public void testGetAdditionalIndexSettingsDowngradeFromTsdb() { + String dataStreamName = "logs-app1"; + Instant twoHoursAgo = Instant.now().minus(4, ChronoUnit.HOURS).truncatedTo(ChronoUnit.MILLIS); + Metadata.Builder mb = Metadata.builder( + DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(Tuple.tuple(dataStreamName, 1)), + List.of(), + twoHoursAgo.toEpochMilli(), + builder().build(), + 1 + ).getMetadata() + ); + Metadata metadata = mb.build(); + + Settings settings = Settings.EMPTY; + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 2), + dataStreamName, + false, + metadata, + Instant.ofEpochMilli(1L), + settings, + List.of() + ); + assertThat(result.size(), equalTo(0)); + } + public void testGenerateRoutingPathFromDynamicTemplate() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); TimeValue lookAheadTime = TimeValue.timeValueHours(2); // default diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java index aebaf1a6c017..ff3013d6f545 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataIndexTemplateServiceTests.java @@ -11,8 +11,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.DataStreamTestHelper; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.Template; @@ -21,7 +19,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.indices.EmptySystemIndices; @@ -30,8 +27,6 @@ import org.elasticsearch.indices.ShardLimitValidator; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.time.Instant; -import java.time.temporal.ChronoUnit; import java.util.Collections; import java.util.List; import java.util.Set; @@ -50,61 +45,6 @@ */ public class MetadataIndexTemplateServiceTests extends ESSingleNodeTestCase { - public void testValidateTsdbDataStreamsReferringTsdbTemplate() throws Exception { - var state = ClusterState.EMPTY_STATE; - final var service = getMetadataIndexTemplateService(); - var template = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - new Template( - builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), - new CompressedXContent(generateTsdbMapping()), - null - ), - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); - state = service.addIndexTemplateV2(state, false, "logs", template); - - var now = Instant.now(); - var mBuilder = Metadata.builder(state.getMetadata()); - DataStreamTestHelper.getClusterStateWithDataStream( - mBuilder, - "unreferenced", - List.of(Tuple.tuple(now.minus(2, ChronoUnit.HOURS), now)) - ); - DataStreamTestHelper.getClusterStateWithDataStream( - mBuilder, - "logs-mysql-default", - List.of(Tuple.tuple(now.minus(2, ChronoUnit.HOURS), now)) - ); - var stateWithDS = ClusterState.builder(state).metadata(mBuilder).build(); - - var e = expectThrows(IllegalArgumentException.class, () -> { - ComposableIndexTemplate nonDSTemplate = new ComposableIndexTemplate( - Collections.singletonList("logs-*-*"), - null, - null, - 100L, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate(false, false), - null - ); - service.addIndexTemplateV2(stateWithDS, false, "logs", nonDSTemplate); - }); - - assertThat( - e.getMessage(), - containsString( - "would cause tsdb data streams [logs-mysql-default] to no longer match a data stream template with a time_series index_mode" - ) - ); - } - public void testRequireRoutingPath() throws Exception { final var service = getMetadataIndexTemplateService(); { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java index 5ade2fa4d63b..bff238d7f16c 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/CreateDataStreamRequestTests.java @@ -28,6 +28,11 @@ protected Request createTestInstance() { return new Request(randomAlphaOfLength(8)); } + @Override + protected Request mutateInstance(Request instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + public void testValidateRequest() { CreateDataStreamAction.Request req = new CreateDataStreamAction.Request("my-data-stream"); ActionRequestValidationException e = req.validate(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DataStreamsStatsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DataStreamsStatsResponseTests.java index 0c60f667db5a..9874c19a7824 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DataStreamsStatsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DataStreamsStatsResponseTests.java @@ -30,6 +30,11 @@ protected DataStreamsStatsAction.Response createTestInstance() { return randomStatsResponse(); } + @Override + protected DataStreamsStatsAction.Response mutateInstance(DataStreamsStatsAction.Response instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + public static DataStreamsStatsAction.Response randomStatsResponse() { int dataStreamCount = randomInt(10); int backingIndicesTotal = 0; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java index 583cc44a5598..d1b3bde33152 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamRequestTests.java @@ -28,6 +28,11 @@ protected Request createTestInstance() { return new Request(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(6))); } + @Override + protected Request mutateInstance(Request instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + public void testValidateRequest() { DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { "my-data-stream" }); ActionRequestValidationException e = req.validate(); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java index 58e87b07a6b1..5e6a1ee4bc9d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsRequestTests.java @@ -34,4 +34,9 @@ protected Request createTestInstance() { }); } + @Override + protected Request mutateInstance(Request instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java index 0e44aa87ae30..37c0d26f9d83 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsResponseTests.java @@ -51,4 +51,9 @@ protected Response createTestInstance() { } return new Response(dataStreams); } + + @Override + protected Response mutateInstance(Response instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java index 863a7dc39310..741122b888c9 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/DataStreamTimestampFieldMapperTests.java @@ -79,7 +79,7 @@ public void testPostParse() throws IOException { })); ParsedDocument doc = docMapper.parse(source(b -> b.field("@timestamp", "2020-12-12"))); - assertThat(doc.rootDoc().getFields("@timestamp").length, equalTo(2)); + assertThat(doc.rootDoc().getFields("@timestamp").length, equalTo(1)); Exception e = expectThrows(MapperException.class, () -> docMapper.parse(source(b -> b.field("@timestamp1", "2020-12-12")))); assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] is missing")); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/MetadataCreateDataStreamServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/MetadataCreateDataStreamServiceTests.java index 81d7e4766689..9e006034b580 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/MetadataCreateDataStreamServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/mapper/MetadataCreateDataStreamServiceTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -36,7 +37,7 @@ public void testValidateTimestampFieldMapping() throws Exception { public void testValidateTimestampFieldMappingNoFieldMapping() { Exception e = expectThrows(IllegalStateException.class, () -> validateTimestampFieldMapping(createMappingLookup("{}"))); assertThat(e.getMessage(), equalTo("[" + DataStreamTimestampFieldMapper.NAME + "] meta field has been disabled")); - String mapping1 = formatted(""" + String mapping1 = Strings.format(""" { "%s": { "enabled": false diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/100_delete_by_query.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/100_delete_by_query.yml index 8b76faf6c44c..7f1ec1f2c685 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/100_delete_by_query.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/100_delete_by_query.yml @@ -65,3 +65,88 @@ indices.delete_data_stream: name: simple-data-stream1 - is_true: acknowledged + +--- +"Delete by query for multiple data streams": + - skip: + features: allowed_warnings + version: " - 7.8.99" + reason: "data streams available in 7.9+" + + - do: + allowed_warnings: + - "index template [my-template2] has index patterns [simple-stream*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" + indices.put_index_template: + name: my-template2 + body: + index_patterns: [simple-stream*] + data_stream: {} + + - do: + indices.create_data_stream: + name: simple-stream1 + - is_true: acknowledged + + - do: + index: + index: simple-stream1 + id: "1" + op_type: create + body: { "number": 4, '@timestamp': '2020-12-12' } + + - do: + index: + index: simple-stream2 + id: "2" + op_type: create + body: { "number": 4, '@timestamp': '2020-12-12' } + + - do: + index: + index: simple-stream2 + id: "3" + op_type: create + body: { "number": 6, '@timestamp': '2020-12-12' } + + - do: + indices.refresh: + index: simple-stream1,simple-stream2 + + # delete any docs with number <= 4 + - do: + delete_by_query: + index: simple-stream* + body: + query: + range: + number: + lte: 4 + + - match: {deleted: 2} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {noops: 0} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + + - do: + indices.refresh: + index: simple-stream1,simple-stream2 + + # verify that both documents with number originally <= 4 have been deleted + - do: + search: + index: simple-stream* + body: { query: { range: { number: { lte: 5 } } } } + - length: { hits.hits: 0 } + + - do: + indices.delete_data_stream: + name: simple-stream1 + - is_true: acknowledged + + - do: + indices.delete_data_stream: + name: simple-stream2 + - is_true: acknowledged diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml index ef39fe124cfa..ccf7cead1d70 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/110_update_by_query.yml @@ -104,3 +104,91 @@ indices.delete_data_stream: name: simple-data-stream1 - is_true: acknowledged + +--- +"Update by query for multiple data streams": + - skip: + features: allowed_warnings + version: " - 7.8.99" + reason: "data streams available in 7.9+" + + - do: + allowed_warnings: + - "index template [my-template2] has index patterns [simple-stream*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" + indices.put_index_template: + name: my-template2 + body: + index_patterns: [simple-stream*] + data_stream: {} + + - do: + indices.create_data_stream: + name: simple-stream1 + - is_true: acknowledged + + - do: + index: + index: simple-stream1 + id: "1" + op_type: create + body: { "number": 4, '@timestamp': '2020-12-12' } + + - do: + index: + index: simple-stream2 + id: "2" + op_type: create + body: { "number": 4, '@timestamp': '2020-12-12' } + + - do: + index: + index: simple-stream2 + id: "3" + op_type: create + body: { "number": 6, '@timestamp': '2020-12-12' } + + - do: + indices.refresh: + index: simple-stream1,simple-stream2 + + # increment by one any docs with number <= 4 + - do: + update_by_query: + index: simple-stream* + body: + script: + source: "ctx._source.number++" + lang: "painless" + query: + range: + number: + lte: 4 + + - match: {updated: 2} + - match: {version_conflicts: 0} + - match: {batches: 1} + - match: {noops: 0} + - match: {failures: []} + - match: {throttled_millis: 0} + - gte: { took: 0 } + + - do: + indices.refresh: + index: simple-stream1,simple-stream2 + + # verify that both numbers originally <= 4 have been incremented by one + - do: + search: + index: simple-stream* + body: { query: { range: { number: { lte: 5 } } } } + - length: { hits.hits: 2 } + + - do: + indices.delete_data_stream: + name: simple-stream1 + - is_true: acknowledged + + - do: + indices.delete_data_stream: + name: simple-stream2 + - is_true: acknowledged diff --git a/modules/ingest-attachment/build.gradle b/modules/ingest-attachment/build.gradle index 64d74ef8b356..36dfd027cd81 100644 --- a/modules/ingest-attachment/build.gradle +++ b/modules/ingest-attachment/build.gradle @@ -7,8 +7,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'Ingest processor that uses Apache Tika to extract contents' @@ -16,8 +16,8 @@ esplugin { } versions << [ - 'tika' : '2.4.0', - 'pdfbox': '2.0.26', + 'tika' : '2.6.0', + 'pdfbox': '2.0.27', 'poi' : '5.2.2', 'mime4j': '0.8.5' ] @@ -49,7 +49,7 @@ dependencies { // Adobe PDF api "org.apache.pdfbox:pdfbox:${versions.pdfbox}" api "org.apache.pdfbox:fontbox:${versions.pdfbox}" - api "org.apache.pdfbox:jempbox:1.8.16" + api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 958e64d2ac4b..e48156ef98da 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 5ac11cd9963c..152ec847a490 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; @@ -61,7 +62,7 @@ public void testFailureInConditionalProcessor() { internalCluster().ensureAtLeastNumDataNodes(1); internalCluster().startMasterOnlyNode(); final String pipelineId = "foo"; - client().admin().cluster().preparePutPipeline(pipelineId, new BytesArray(formatted(""" + client().admin().cluster().preparePutPipeline(pipelineId, new BytesArray(Strings.format(""" { "processors": [ { @@ -109,7 +110,7 @@ public void testScriptDisabled() throws Exception { String pipelineIdWithScript = pipelineIdWithoutScript + "_script"; internalCluster().startNode(); - BytesReference pipelineWithScript = new BytesArray(formatted(""" + BytesReference pipelineWithScript = new BytesArray(Strings.format(""" { "processors": [ { "script": { "lang": "%s", "source": "my_script" } } ] }""", MockScriptEngine.NAME)); @@ -179,7 +180,7 @@ public Settings onNodeStopped(String nodeName) { public void testPipelineWithScriptProcessorThatHasStoredScript() throws Exception { internalCluster().startNode(); - client().admin().cluster().preparePutStoredScript().setId("1").setContent(new BytesArray(formatted(""" + client().admin().cluster().preparePutStoredScript().setId("1").setContent(new BytesArray(Strings.format(""" {"script": {"lang": "%s", "source": "my_script"} } """, MockScriptEngine.NAME)), XContentType.JSON).get(); BytesReference pipeline = new BytesArray(""" diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java index 4a9fa93662bd..84c4315a6901 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateFormat.java @@ -36,11 +36,10 @@ enum DateFormat { @Override Function getFunction(String format, ZoneId timezone, Locale locale) { return (date) -> { - TemporalAccessor accessor = DateFormatter.forPattern("iso8601").parse(date); + TemporalAccessor accessor = ISO_8601.parse(date); // even though locale could be set to en-us, Locale.ROOT (following iso8601 calendar data rules) should be used return DateFormatters.from(accessor, Locale.ROOT, timezone).withZoneSameInstant(timezone); }; - } }, Unix { @@ -115,6 +114,14 @@ Function getFunction(String format, ZoneId zoneId, Locale } }; + /** It's important to keep this variable as a constant because {@link DateFormatter#forPattern(String)} is an expensive method and, + * in this case, it's a never changing value. + *
+ * Also, we shouldn't inline it in the {@link DateFormat#Iso8601}'s enum because it'd make useless the cache used + * at {@link DateProcessor}). + */ + private static final DateFormatter ISO_8601 = DateFormatter.forPattern("iso8601"); + abstract Function getFunction(String format, ZoneId timezone, Locale locale); static DateFormat fromString(String format) { diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java index 71a73f605a4c..8ef870f77377 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DateProcessor.java @@ -9,8 +9,10 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.util.LocaleUtils; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Nullable; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -19,6 +21,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; +import java.lang.ref.SoftReference; import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; @@ -26,7 +29,9 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.ConcurrentMap; import java.util.function.Function; +import java.util.function.Supplier; public final class DateProcessor extends AbstractProcessor { @@ -72,9 +77,17 @@ public final class DateProcessor extends AbstractProcessor { this.targetField = targetField; this.formats = formats; this.dateParsers = new ArrayList<>(this.formats.size()); + for (String format : formats) { DateFormat dateFormat = DateFormat.fromString(format); - dateParsers.add((params) -> dateFormat.getFunction(format, newDateTimeZone(params), newLocale(params))); + dateParsers.add((params) -> { + var documentZoneId = newDateTimeZone(params); + var documentLocale = newLocale(params); + return Cache.INSTANCE.getOrCompute( + new Cache.Key(format, documentZoneId, documentLocale), + () -> dateFormat.getFunction(format, documentZoneId, documentLocale) + ); + }); } this.outputFormat = outputFormat; formatter = DateFormatter.forPattern(this.outputFormat); @@ -198,4 +211,50 @@ public DateProcessor create( ); } } + + /** + * An ad-hoc cache class that just throws away the cached values once it's full because we don't want to affect the performance + * while applying eviction policies when adding new values or retrieving them. + */ + static final class Cache { + + private static final String CACHE_CAPACITY_SETTING = "es.ingest.date_processor.cache_capacity"; + static final Cache INSTANCE; + + static { + var cacheSizeStr = System.getProperty(CACHE_CAPACITY_SETTING, "256"); + try { + INSTANCE = new Cache(Integer.parseInt(cacheSizeStr)); + } catch (NumberFormatException e) { + throw new SettingsException("{} must be a valid number but was [{}]", CACHE_CAPACITY_SETTING, cacheSizeStr); + } + } + private final ConcurrentMap>> map; + private final int capacity; + + Cache(int capacity) { + if (capacity <= 0) { + throw new IllegalArgumentException("cache capacity must be a value greater than 0 but was " + capacity); + } + this.capacity = capacity; + this.map = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(this.capacity); + } + + Function getOrCompute(Key key, Supplier> supplier) { + Function fn; + var element = map.get(key); + // element exist and wasn't GCed + if (element != null && (fn = element.get()) != null) { + return fn; + } + if (map.size() >= capacity) { + map.clear(); + } + fn = supplier.get(); + map.put(key, new SoftReference<>(fn)); + return fn; + } + + record Key(String format, ZoneId zoneId, Locale locale) {} + } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index 5dcf944eaa2a..a13b7d21bc11 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.ingest.common; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -57,7 +57,7 @@ public Request(boolean sorted, String ecsCompatibility) { Request(StreamInput in) throws IOException { super(in); this.sorted = in.readBoolean(); - this.ecsCompatibility = in.getVersion().onOrAfter(Version.V_8_0_0) + this.ecsCompatibility = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readString() : GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE; } @@ -71,7 +71,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(sorted); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeString(ecsCompatibility); } } @@ -144,18 +144,15 @@ public TransportAction(TransportService transportService, ActionFilters actionFi @Override protected void doExecute(Task task, Request request, ActionListener listener) { - try { - listener.onResponse( - new Response( - request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) - ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns - : request.sorted() ? sortedEcsV1GrokPatterns - : ecsV1GrokPatterns - ) - ); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith( + listener, + () -> new Response( + request.getEcsCompatibility().equals(Grok.ECS_COMPATIBILITY_MODES[0]) + ? request.sorted() ? sortedLegacyGrokPatterns : legacyGrokPatterns + : request.sorted() ? sortedEcsV1GrokPatterns + : ecsV1GrokPatterns + ) + ); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index a79954de0f35..7b20cfbf0b39 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestDocument; @@ -32,12 +33,14 @@ public final class JsonProcessor extends AbstractProcessor { public static final String TYPE = "json"; + private static final String STRICT_JSON_PARSING_PARAMETER = "strict_json_parsing"; private final String field; private final String targetField; private final boolean addToRoot; private final ConflictStrategy addToRootConflictStrategy; private final boolean allowDuplicateKeys; + private final boolean strictJsonParsing; JsonProcessor( String tag, @@ -47,6 +50,19 @@ public final class JsonProcessor extends AbstractProcessor { boolean addToRoot, ConflictStrategy addToRootConflictStrategy, boolean allowDuplicateKeys + ) { + this(tag, description, field, targetField, addToRoot, addToRootConflictStrategy, allowDuplicateKeys, true); + } + + JsonProcessor( + String tag, + String description, + String field, + String targetField, + boolean addToRoot, + ConflictStrategy addToRootConflictStrategy, + boolean allowDuplicateKeys, + boolean strictJsonParsing ) { super(tag, description); this.field = field; @@ -54,6 +70,7 @@ public final class JsonProcessor extends AbstractProcessor { this.addToRoot = addToRoot; this.addToRootConflictStrategy = addToRootConflictStrategy; this.allowDuplicateKeys = allowDuplicateKeys; + this.strictJsonParsing = strictJsonParsing; } public String getField() { @@ -72,7 +89,7 @@ public ConflictStrategy getAddToRootConflictStrategy() { return addToRootConflictStrategy; } - public static Object apply(Object fieldValue, boolean allowDuplicateKeys) { + public static Object apply(Object fieldValue, boolean allowDuplicateKeys, boolean strictJsonParsing) { BytesReference bytesRef = fieldValue == null ? new BytesArray("null") : new BytesArray(fieldValue.toString()); try ( InputStream stream = bytesRef.streamInput(); @@ -96,14 +113,42 @@ public static Object apply(Object fieldValue, boolean allowDuplicateKeys) { } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { throw new IllegalArgumentException("cannot read binary value"); } + if (strictJsonParsing) { + String errorMessage = Strings.format( + "The input %s is not valid JSON and the %s parameter is true", + fieldValue, + STRICT_JSON_PARSING_PARAMETER + ); + /* + * If strict JSON parsing is disabled, then once we've found the first token then we move on. For example for the string + * "123 \"foo\"" we would just return the first token, 123. However, if strict parsing is enabled (which it is by default), + * then we check to see whether there are any more tokens at this point. We expect the next token to be null. If there is + * another token or if the parser blows up, then we know we had invalid JSON and we alert the user with an + * IllegalArgumentException. + */ + try { + token = parser.nextToken(); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException(errorMessage, e); + } + if (token != null) { + throw new IllegalArgumentException(errorMessage); + } + } return value; } catch (IOException e) { throw new IllegalArgumentException(e); } } - public static void apply(Map ctx, String fieldName, boolean allowDuplicateKeys, ConflictStrategy conflictStrategy) { - Object value = apply(ctx.get(fieldName), allowDuplicateKeys); + public static void apply( + Map ctx, + String fieldName, + boolean allowDuplicateKeys, + ConflictStrategy conflictStrategy, + boolean strictJsonParsing + ) { + Object value = apply(ctx.get(fieldName), allowDuplicateKeys, strictJsonParsing); if (value instanceof Map) { @SuppressWarnings("unchecked") Map map = (Map) value; @@ -140,9 +185,9 @@ public static void recursiveMerge(Map target, Map map, String field) { - JsonProcessor.apply(map, field, false, JsonProcessor.ConflictStrategy.REPLACE); + JsonProcessor.apply(map, field, false, JsonProcessor.ConflictStrategy.REPLACE, true); + } + + /** + * Uses {@link JsonProcessor} to convert a JSON string to a structured JSON + * object. This method is a more lenient version of {@link #json(Map, String)}. For example if given fieldValue + * "{"foo":"bar"} 123", + * this method will return a map with key-vale pair "foo" and "bar" rather than throwing an IllegalArgumentException. + * + * @param map map that contains the JSON string and will receive the + * structured JSON content + * @param field key that identifies the entry in map that + * contains the JSON string + */ + public static void jsonLenient(Map map, String field) { + JsonProcessor.apply(map, field, false, JsonProcessor.ConflictStrategy.REPLACE, false); } /** diff --git a/modules/ingest-common/src/main/resources/org/elasticsearch/ingest/common/processors_whitelist.txt b/modules/ingest-common/src/main/resources/org/elasticsearch/ingest/common/processors_whitelist.txt index 4d05bdfc283b..e1779aade519 100644 --- a/modules/ingest-common/src/main/resources/org/elasticsearch/ingest/common/processors_whitelist.txt +++ b/modules/ingest-common/src/main/resources/org/elasticsearch/ingest/common/processors_whitelist.txt @@ -13,7 +13,9 @@ class org.elasticsearch.ingest.common.Processors { String lowercase(String) String uppercase(String) Object json(Object) + Object jsonLenient(Object) void json(Map, String) + void jsonLenient(Map, String) String urlDecode(String) String communityId(String, String, Object, Object, Object, Object, Object, Object, int) String communityId(String, String, Object, Object, Object, Object, Object, Object) diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java index b0e74000da57..209375ac12ed 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java @@ -15,7 +15,7 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public abstract class AbstractStringProcessorFactoryTestCase extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java index 54f11932bfe5..daccbc9559d4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorFactoryTests.java @@ -15,13 +15,12 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class AppendProcessorFactoryTests extends ESTestCase { @@ -39,14 +38,14 @@ public void testCreate() throws Exception { if (randomBoolean()) { value = "value1"; } else { - value = Arrays.asList("value1", "value2", "value3"); + value = List.of("value1", "value2", "value3"); } config.put("value", value); String processorTag = randomAlphaOfLength(10); AppendProcessor appendProcessor = factory.create(null, processorTag, null, config); assertThat(appendProcessor.getTag(), equalTo(processorTag)); - assertThat(appendProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); - assertThat(appendProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo(value)); + assertThat(appendProcessor.getField().newInstance(Map.of()).execute(), equalTo("field1")); + assertThat(appendProcessor.getValue().copyAndResolve(Map.of()), equalTo(value)); } public void testCreateNoFieldPresent() throws Exception { @@ -110,7 +109,7 @@ public void testMediaType() throws Exception { // invalid media type expectedMediaType = randomValueOtherThanMany( - m -> Arrays.asList(ConfigurationUtils.VALID_MEDIA_TYPES).contains(m), + m -> List.of(ConfigurationUtils.VALID_MEDIA_TYPES).contains(m), () -> randomAlphaOfLengthBetween(5, 9) ); final Map config2 = new HashMap<>(); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java index d789f1bd79c4..3b5c2d39a3ca 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AppendProcessorTests.java @@ -27,11 +27,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class AppendProcessorTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java index 1b35ea427c59..8ca6e71b4a51 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.RandomDocumentPicks; -import org.hamcrest.CoreMatchers; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class BytesProcessorTests extends AbstractStringProcessorTestCase { @@ -51,14 +51,8 @@ public void testTooLarge() { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "8912pb"); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> processor.execute(ingestDocument)); - assertThat( - exception.getMessage(), - CoreMatchers.equalTo("failed to parse setting [Ingest Field] with value [8912pb] as a size in bytes") - ); - assertThat( - exception.getCause().getMessage(), - CoreMatchers.containsString("Values greater than 9223372036854775807 bytes are not supported") - ); + assertThat(exception.getMessage(), equalTo("failed to parse setting [Ingest Field] with value [8912pb] as a size in bytes")); + assertThat(exception.getCause().getMessage(), containsString("Values greater than 9223372036854775807 bytes are not supported")); } public void testNotBytes() { @@ -66,7 +60,7 @@ public void testNotBytes() { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "junk"); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> processor.execute(ingestDocument)); - assertThat(exception.getMessage(), CoreMatchers.equalTo("failed to parse setting [Ingest Field] with value [junk]")); + assertThat(exception.getMessage(), equalTo("failed to parse setting [Ingest Field] with value [junk]")); } public void testMissingUnits() { @@ -74,7 +68,7 @@ public void testMissingUnits() { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1"); Processor processor = newProcessor(fieldName, randomBoolean(), fieldName); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> processor.execute(ingestDocument)); - assertThat(exception.getMessage(), CoreMatchers.containsString("unit is missing or unrecognized")); + assertThat(exception.getMessage(), containsString("unit is missing or unrecognized")); } public void testFractional() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorFactoryTests.java index 3ded531131f3..8d08040a4058 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CommunityIdProcessorFactoryTests.java @@ -25,8 +25,8 @@ import static org.elasticsearch.ingest.common.CommunityIdProcessor.Factory.DEFAULT_TARGET; import static org.elasticsearch.ingest.common.CommunityIdProcessor.Factory.DEFAULT_TRANSPORT; import static org.elasticsearch.ingest.common.CommunityIdProcessor.toUint16; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class CommunityIdProcessorFactoryTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java index 60ae12dd1aa0..df31f0ece6f6 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorFactoryTests.java @@ -10,12 +10,11 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -46,7 +45,7 @@ public void testCreateUnsupportedType() throws Exception { factory.create(null, null, null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), Matchers.equalTo("[type] type [" + type + "] not supported, cannot convert field.")); + assertThat(e.getMessage(), equalTo("[type] type [" + type + "] not supported, cannot convert field.")); assertThat(e.getMetadata("es.processor_type").get(0), equalTo(ConvertProcessor.TYPE)); assertThat(e.getMetadata("es.property_name").get(0), equalTo("type")); assertThat(e.getMetadata("es.processor_tag"), nullValue()); @@ -62,7 +61,7 @@ public void testCreateNoFieldPresent() throws Exception { factory.create(null, null, null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); + assertThat(e.getMessage(), equalTo("[field] required property is missing")); } } @@ -74,7 +73,7 @@ public void testCreateNoTypePresent() throws Exception { factory.create(null, null, null, config); fail("factory create should have failed"); } catch (ElasticsearchParseException e) { - assertThat(e.getMessage(), Matchers.equalTo("[type] required property is missing")); + assertThat(e.getMessage(), equalTo("[type] required property is missing")); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java index 0590934ccd36..f1b7433e10a0 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ConvertProcessorTests.java @@ -514,7 +514,7 @@ public void testAutoConvertNotString() throws Exception { } default -> throw new UnsupportedOperationException(); } - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomValue)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", randomValue)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); @@ -523,7 +523,7 @@ public void testAutoConvertNotString() throws Exception { public void testAutoConvertStringNotMatched() throws Exception { String value = "notAnIntFloatOrBool"; - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", value)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", value)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); @@ -533,10 +533,7 @@ public void testAutoConvertStringNotMatched() throws Exception { public void testAutoConvertMatchBoolean() throws Exception { boolean randomBoolean = randomBoolean(); String booleanString = Boolean.toString(randomBoolean); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument( - random(), - Collections.singletonMap("field", booleanString) - ); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", booleanString)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); @@ -546,7 +543,7 @@ public void testAutoConvertMatchBoolean() throws Exception { public void testAutoConvertMatchInteger() throws Exception { int randomInt = randomInt(); String randomString = Integer.toString(randomInt); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", randomString)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); @@ -556,7 +553,7 @@ public void testAutoConvertMatchInteger() throws Exception { public void testAutoConvertMatchLong() throws Exception { long randomLong = randomLong(); String randomString = Long.toString(randomLong); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", randomString)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); @@ -567,7 +564,7 @@ public void testAutoConvertDoubleNotMatched() throws Exception { double randomDouble = randomDouble(); String randomString = Double.toString(randomDouble); float randomFloat = Float.parseFloat(randomString); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", randomString)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); @@ -578,7 +575,7 @@ public void testAutoConvertDoubleNotMatched() throws Exception { public void testAutoConvertMatchFloat() throws Exception { float randomFloat = randomFloat(); String randomString = Float.toString(randomFloat); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("field", randomString)); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("field", randomString)); Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, "field", "field", Type.AUTO, false); processor.execute(ingestDocument); Object convertedValue = ingestDocument.getFieldValue("field", Object.class); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CsvProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CsvProcessorFactoryTests.java index c4a631d05c35..7053e079a87d 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CsvProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/CsvProcessorFactoryTests.java @@ -10,10 +10,10 @@ import org.elasticsearch.test.ESTestCase; -import java.util.Collections; import java.util.HashMap; +import java.util.List; +import java.util.Map; -import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -24,7 +24,7 @@ public void testProcessorIsCreated() { CsvProcessor.Factory factory = new CsvProcessor.Factory(); HashMap properties = new HashMap<>(); properties.put("field", "field"); - properties.put("target_fields", Collections.singletonList("target")); + properties.put("target_fields", List.of("target")); properties.put("quote", "|"); properties.put("separator", "/"); properties.put("empty_value", "empty"); @@ -39,6 +39,6 @@ public void testProcessorIsCreated() { assertThat(csv.emptyValue, equalTo("empty")); assertThat(csv.trim, equalTo(true)); assertThat(csv.ignoreMissing, equalTo(true)); - assertThat(properties, is(emptyMap())); + assertThat(properties, is(Map.of())); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java index 2d3b3b9a4a9d..f7c94e24881d 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateFormatTests.java @@ -19,8 +19,8 @@ import java.util.Locale; import java.util.function.Function; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.core.IsEqual.equalTo; public class DateFormatTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java index 32b181050102..1e247655c875 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameFactoryTests.java @@ -11,14 +11,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; import java.time.ZoneOffset; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class DateIndexNameFactoryTests extends ESTestCase { public void testDefaults() throws Exception { @@ -28,12 +28,12 @@ public void testDefaults() throws Exception { config.put("date_rounding", "y"); DateIndexNameProcessor processor = factory.create(null, null, null, config); - assertThat(processor.getDateFormats().size(), Matchers.equalTo(1)); - assertThat(processor.getField(), Matchers.equalTo("_field")); - assertThat(processor.getIndexNamePrefixTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("")); - assertThat(processor.getDateRoundingTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("y")); - assertThat(processor.getIndexNameFormatTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("yyyy-MM-dd")); - assertThat(processor.getTimezone(), Matchers.equalTo(ZoneOffset.UTC)); + assertThat(processor.getDateFormats().size(), equalTo(1)); + assertThat(processor.getField(), equalTo("_field")); + assertThat(processor.getIndexNamePrefixTemplate().newInstance(Map.of()).execute(), equalTo("")); + assertThat(processor.getDateRoundingTemplate().newInstance(Map.of()).execute(), equalTo("y")); + assertThat(processor.getIndexNameFormatTemplate().newInstance(Map.of()).execute(), equalTo("yyyy-MM-dd")); + assertThat(processor.getTimezone(), equalTo(ZoneOffset.UTC)); } public void testSpecifyOptionalSettings() throws Exception { @@ -42,10 +42,10 @@ public void testSpecifyOptionalSettings() throws Exception { config.put("field", "_field"); config.put("index_name_prefix", "_prefix"); config.put("date_rounding", "y"); - config.put("date_formats", Arrays.asList("UNIX", "UNIX_MS")); + config.put("date_formats", List.of("UNIX", "UNIX_MS")); DateIndexNameProcessor processor = factory.create(null, null, null, config); - assertThat(processor.getDateFormats().size(), Matchers.equalTo(2)); + assertThat(processor.getDateFormats().size(), equalTo(2)); config = new HashMap<>(); config.put("field", "_field"); @@ -54,7 +54,7 @@ public void testSpecifyOptionalSettings() throws Exception { config.put("index_name_format", "yyyyMMdd"); processor = factory.create(null, null, null, config); - assertThat(processor.getIndexNameFormatTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("yyyyMMdd")); + assertThat(processor.getIndexNameFormatTemplate().newInstance(Map.of()).execute(), equalTo("yyyyMMdd")); config = new HashMap<>(); config.put("field", "_field"); @@ -63,7 +63,7 @@ public void testSpecifyOptionalSettings() throws Exception { config.put("timezone", "+02:00"); processor = factory.create(null, null, null, config); - assertThat(processor.getTimezone(), Matchers.equalTo(ZoneOffset.ofHours(2))); + assertThat(processor.getTimezone(), equalTo(ZoneOffset.ofHours(2))); config = new HashMap<>(); config.put("field", "_field"); @@ -71,7 +71,7 @@ public void testSpecifyOptionalSettings() throws Exception { config.put("date_rounding", "y"); processor = factory.create(null, null, null, config); - assertThat(processor.getIndexNamePrefixTemplate().newInstance(Collections.emptyMap()).execute(), Matchers.equalTo("_prefix")); + assertThat(processor.getIndexNamePrefixTemplate().newInstance(Map.of()).execute(), equalTo("_prefix")); } public void testRequiredFields() throws Exception { @@ -79,11 +79,11 @@ public void testRequiredFields() throws Exception { Map config = new HashMap<>(); config.put("date_rounding", "y"); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); - assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); + assertThat(e.getMessage(), equalTo("[field] required property is missing")); config.clear(); config.put("field", "_field"); e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); - assertThat(e.getMessage(), Matchers.equalTo("[date_rounding] required property is missing")); + assertThat(e.getMessage(), equalTo("[date_rounding] required property is missing")); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java index a79580d743a0..0257bbcd46a7 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateIndexNameProcessorTests.java @@ -15,54 +15,33 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; -import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.function.Function; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class DateIndexNameProcessorTests extends ESTestCase { public void testJavaPattern() throws Exception { Function function = DateFormat.Java.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSXX", ZoneOffset.UTC, Locale.ROOT); - DateIndexNameProcessor processor = createProcessor( - "_field", - Collections.singletonList(function), - ZoneOffset.UTC, - "events-", - "y", - "yyyyMMdd" - ); - IngestDocument document = new IngestDocument( - "_index", - "_id", - 1, - null, - null, - Collections.singletonMap("_field", "2016-04-25T12:24:20.101Z") - ); + DateIndexNameProcessor processor = createProcessor("_field", List.of(function), ZoneOffset.UTC, "events-", "y", "yyyyMMdd"); + IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Map.of("_field", "2016-04-25T12:24:20.101Z")); processor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); } public void testTAI64N() throws Exception { Function function = DateFormat.Tai64n.getFunction(null, ZoneOffset.UTC, null); - DateIndexNameProcessor dateProcessor = createProcessor( - "_field", - Collections.singletonList(function), - ZoneOffset.UTC, - "events-", - "m", - "yyyyMMdd" - ); + DateIndexNameProcessor dateProcessor = createProcessor("_field", List.of(function), ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); IngestDocument document = new IngestDocument( "_index", "_id", 1, null, null, - Collections.singletonMap("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024") + Map.of("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024") ); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); @@ -70,34 +49,20 @@ public void testTAI64N() throws Exception { public void testUnixMs() throws Exception { Function function = DateFormat.UnixMs.getFunction(null, ZoneOffset.UTC, null); - DateIndexNameProcessor dateProcessor = createProcessor( - "_field", - Collections.singletonList(function), - ZoneOffset.UTC, - "events-", - "m", - "yyyyMMdd" - ); - IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("_field", "1000500")); + DateIndexNameProcessor dateProcessor = createProcessor("_field", List.of(function), ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); + IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Map.of("_field", "1000500")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); - document = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("_field", 1000500L)); + document = new IngestDocument("_index", "_id", 1, null, null, Map.of("_field", 1000500L)); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); } public void testUnix() throws Exception { Function function = DateFormat.Unix.getFunction(null, ZoneOffset.UTC, null); - DateIndexNameProcessor dateProcessor = createProcessor( - "_field", - Collections.singletonList(function), - ZoneOffset.UTC, - "events-", - "m", - "yyyyMMdd" - ); - IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("_field", "1000.5")); + DateIndexNameProcessor dateProcessor = createProcessor("_field", List.of(function), ZoneOffset.UTC, "events-", "m", "yyyyMMdd"); + IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Map.of("_field", "1000.5")); dateProcessor.execute(document); assertThat(document.getSourceAndMetadata().get("_index"), equalTo("")); } @@ -111,14 +76,14 @@ public void testTemplatedFields() throws Exception { DateIndexNameProcessor dateProcessor = createProcessor( "_field", - Collections.singletonList(dateTimeFunction), + List.of(dateTimeFunction), ZoneOffset.UTC, indexNamePrefix, dateRounding, indexNameFormat ); - IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("_field", date)); + IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Map.of("_field", date)); dateProcessor.execute(document); assertThat( diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java index 5245a336acf4..52aa66c37d6a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorFactoryTests.java @@ -14,9 +14,8 @@ import org.junit.Before; import java.time.ZoneId; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; @@ -36,13 +35,13 @@ public void testBuildDefaults() throws Exception { Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); - config.put("formats", Collections.singletonList("dd/MM/yyyyy")); + config.put("formats", List.of("dd/MM/yyyyy")); String processorTag = randomAlphaOfLength(10); DateProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(sourceField)); assertThat(processor.getTargetField(), equalTo(DateProcessor.DEFAULT_TARGET_FIELD)); - assertThat(processor.getFormats(), equalTo(Collections.singletonList("dd/MM/yyyyy"))); + assertThat(processor.getFormats(), equalTo(List.of("dd/MM/yyyyy"))); assertNull(processor.getLocale()); assertNull(processor.getTimezone()); } @@ -51,7 +50,7 @@ public void testMatchFieldIsMandatory() throws Exception { Map config = new HashMap<>(); String targetField = randomAlphaOfLengthBetween(1, 10); config.put("target_field", targetField); - config.put("formats", Collections.singletonList("dd/MM/yyyyy")); + config.put("formats", List.of("dd/MM/yyyyy")); try { factory.create(null, null, null, config); @@ -80,34 +79,34 @@ public void testParseLocale() throws Exception { Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); - config.put("formats", Collections.singletonList("dd/MM/yyyyy")); + config.put("formats", List.of("dd/MM/yyyyy")); Locale locale = randomFrom(Locale.GERMANY, Locale.FRENCH, Locale.ROOT); config.put("locale", locale.toLanguageTag()); DateProcessor processor = factory.create(null, null, null, config); - assertThat(processor.getLocale().newInstance(Collections.emptyMap()).execute(), equalTo(locale.toLanguageTag())); + assertThat(processor.getLocale().newInstance(Map.of()).execute(), equalTo(locale.toLanguageTag())); } public void testParseTimezone() throws Exception { Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); - config.put("formats", Collections.singletonList("dd/MM/yyyyy")); + config.put("formats", List.of("dd/MM/yyyyy")); ZoneId timezone = randomZone(); config.put("timezone", timezone.getId()); DateProcessor processor = factory.create(null, null, null, config); - assertThat(processor.getTimezone().newInstance(Collections.emptyMap()).execute(), equalTo(timezone.getId())); + assertThat(processor.getTimezone().newInstance(Map.of()).execute(), equalTo(timezone.getId())); } public void testParseMatchFormats() throws Exception { Map config = new HashMap<>(); String sourceField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); - config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); + config.put("formats", List.of("dd/MM/yyyy", "dd-MM-yyyy")); DateProcessor processor = factory.create(null, null, null, config); - assertThat(processor.getFormats(), equalTo(Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy"))); + assertThat(processor.getFormats(), equalTo(List.of("dd/MM/yyyy", "dd-MM-yyyy"))); } public void testParseMatchFormatsFailure() throws Exception { @@ -130,7 +129,7 @@ public void testParseTargetField() throws Exception { String targetField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); config.put("target_field", targetField); - config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); + config.put("formats", List.of("dd/MM/yyyy", "dd-MM-yyyy")); DateProcessor processor = factory.create(null, null, null, config); assertThat(processor.getTargetField(), equalTo(targetField)); @@ -143,7 +142,7 @@ public void testParseOutputFormat() throws Exception { String targetField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); config.put("target_field", targetField); - config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); + config.put("formats", List.of("dd/MM/yyyy", "dd-MM-yyyy")); config.put("output_format", outputFormat); DateProcessor processor = factory.create(null, null, null, config); assertThat(processor.getOutputFormat(), equalTo(outputFormat)); @@ -155,7 +154,7 @@ public void testDefaultOutputFormat() throws Exception { String targetField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); config.put("target_field", targetField); - config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); + config.put("formats", List.of("dd/MM/yyyy", "dd-MM-yyyy")); DateProcessor processor = factory.create(null, null, null, config); assertThat(processor.getOutputFormat(), equalTo(DateProcessor.DEFAULT_OUTPUT_FORMAT)); } @@ -167,7 +166,7 @@ public void testInvalidOutputFormatRejected() throws Exception { String targetField = randomAlphaOfLengthBetween(1, 10); config.put("field", sourceField); config.put("target_field", targetField); - config.put("formats", Arrays.asList("dd/MM/yyyy", "dd-MM-yyyy")); + config.put("formats", List.of("dd/MM/yyyy", "dd-MM-yyyy")); config.put("output_format", outputFormat); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> factory.create(null, null, null, config)); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java index be6ff3c5fdef..18454c866cb2 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DateProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest.common; +import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.TestTemplateService; @@ -19,15 +20,19 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class DateProcessorTests extends ESTestCase { @@ -46,7 +51,7 @@ public void testJavaPattern() { templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH), "date_as_string", - Collections.singletonList("yyyy dd MM HH:mm:ss"), + List.of("yyyy dd MM HH:mm:ss"), "date_as_date" ); Map document = new HashMap<>(); @@ -129,7 +134,7 @@ public void testJavaPatternNoTimezone() { null, null, "date_as_string", - Arrays.asList("yyyy dd MM HH:mm:ss XXX"), + List.of("yyyy dd MM HH:mm:ss XXX"), "date_as_date" ); @@ -148,7 +153,7 @@ public void testInvalidJavaPattern() { templatize(ZoneOffset.UTC), templatize(randomLocale(random())), "date_as_string", - Collections.singletonList("invalid pattern"), + List.of("invalid pattern"), "date_as_date" ); Map document = new HashMap<>(); @@ -169,7 +174,7 @@ public void testJavaPatternLocale() { templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ITALIAN), "date_as_string", - Collections.singletonList("yyyy dd MMMM"), + List.of("yyyy dd MMMM"), "date_as_date" ); Map document = new HashMap<>(); @@ -187,7 +192,7 @@ public void testJavaPatternEnglishLocale() { templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH), "date_as_string", - Collections.singletonList("yyyy dd MMMM"), + List.of("yyyy dd MMMM"), "date_as_date" ); Map document = new HashMap<>(); @@ -205,7 +210,7 @@ public void testJavaPatternDefaultYear() { templatize(ZoneId.of("Europe/Amsterdam")), templatize(Locale.ENGLISH), "date_as_string", - Collections.singletonList(format), + List.of(format), "date_as_date" ); Map document = new HashMap<>(); @@ -225,7 +230,7 @@ public void testTAI64N() { templatize(ZoneOffset.ofHours(2)), templatize(randomLocale(random())), "date_as_string", - Collections.singletonList("TAI64N"), + List.of("TAI64N"), "date_as_date" ); Map document = new HashMap<>(); @@ -243,7 +248,7 @@ public void testUnixMs() { templatize(ZoneOffset.UTC), templatize(randomLocale(random())), "date_as_string", - Collections.singletonList("UNIX_MS"), + List.of("UNIX_MS"), "date_as_date" ); Map document = new HashMap<>(); @@ -266,7 +271,7 @@ public void testUnix() { templatize(ZoneOffset.UTC), templatize(randomLocale(random())), "date_as_string", - Collections.singletonList("UNIX"), + List.of("UNIX"), "date_as_date" ); Map document = new HashMap<>(); @@ -283,7 +288,7 @@ public void testInvalidTimezone() { new TestTemplateService.MockTemplateScript.Factory("invalid_timezone"), templatize(randomLocale(random())), "date_as_string", - Collections.singletonList("yyyy"), + List.of("yyyy"), "date_as_date" ); Map document = new HashMap<>(); @@ -303,7 +308,7 @@ public void testInvalidLocale() { templatize(ZoneOffset.UTC), new TestTemplateService.MockTemplateScript.Factory("invalid_locale"), "date_as_string", - Collections.singletonList("yyyy"), + List.of("yyyy"), "date_as_date" ); Map document = new HashMap<>(); @@ -324,7 +329,7 @@ public void testOutputFormat() { null, null, "date_as_string", - Collections.singletonList("iso8601"), + List.of("iso8601"), "date_as_date", "HH:mm:ss.SSSSSSSSS" ); @@ -333,7 +338,34 @@ public void testOutputFormat() { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); processor.execute(ingestDocument); // output format is time only with nanosecond precision - String expectedDate = "00:00:00." + formatted("%09d", nanosAfterEpoch); + String expectedDate = "00:00:00." + Strings.format("%09d", nanosAfterEpoch); assertThat(ingestDocument.getFieldValue("date_as_date", String.class), equalTo(expectedDate)); } + + @SuppressWarnings("unchecked") + public void testCacheIsEvictedAfterReachMaxCapacity() { + Supplier> supplier1 = mock(Supplier.class); + Supplier> supplier2 = mock(Supplier.class); + Function zonedDateTimeFunction1 = str -> ZonedDateTime.now(); + Function zonedDateTimeFunction2 = str -> ZonedDateTime.now(); + var cache = new DateProcessor.Cache(1); + var key1 = new DateProcessor.Cache.Key("format-1", ZoneId.systemDefault(), Locale.ROOT); + var key2 = new DateProcessor.Cache.Key("format-2", ZoneId.systemDefault(), Locale.ROOT); + + when(supplier1.get()).thenReturn(zonedDateTimeFunction1); + when(supplier2.get()).thenReturn(zonedDateTimeFunction2); + + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 call to supplier1 + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // 1 call to supplier2 + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 more call to supplier1 + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // 1 more call to supplier2 + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key2, supplier2), zonedDateTimeFunction2); // should use cached value + assertEquals(cache.getOrCompute(key1, supplier1), zonedDateTimeFunction1); // 1 more to call to supplier1 + + verify(supplier1, times(3)).get(); + verify(supplier2, times(2)).get(); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java index c426ee1ef8be..b4bd9fea704b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorFactoryTests.java @@ -12,14 +12,13 @@ import org.elasticsearch.dissect.DissectException; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; public class DissectProcessorFactoryTests extends ESTestCase { @@ -50,7 +49,7 @@ public void testCreateMissingField() { Map config = new HashMap<>(); config.put("pattern", "%{a},%{b},%{c}"); Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config)); - assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing")); + assertThat(e.getMessage(), equalTo("[field] required property is missing")); } public void testCreateMissingPattern() { @@ -58,7 +57,7 @@ public void testCreateMissingPattern() { Map config = new HashMap<>(); config.put("field", randomAlphaOfLength(10)); Exception e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, "_tag", null, config)); - assertThat(e.getMessage(), Matchers.equalTo("[pattern] required property is missing")); + assertThat(e.getMessage(), equalTo("[pattern] required property is missing")); } public void testCreateMissingOptionals() { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java index ec2762ebfe53..f57d82a7c0d1 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DissectProcessorTests.java @@ -14,12 +14,13 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.CoreMatchers; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; /** @@ -29,14 +30,7 @@ public class DissectProcessorTests extends ESTestCase { public void testMatch() { - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_id", - 1, - null, - null, - Collections.singletonMap("message", "foo,bar,baz") - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("message", "foo,bar,baz")); DissectProcessor dissectProcessor = new DissectProcessor("", null, "message", "%{a},%{b},%{c}", "", true); dissectProcessor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("a", String.class), equalTo("foo")); @@ -68,7 +62,7 @@ public void testAdvancedMatch() { 1, null, null, - Collections.singletonMap("message", "foo bar,,,,,,,baz nope:notagain 😊 🐇 🙃") + Map.of("message", "foo bar,,,,,,,baz nope:notagain 😊 🐇 🙃") ); DissectProcessor dissectProcessor = new DissectProcessor( "", @@ -87,17 +81,10 @@ public void testAdvancedMatch() { } public void testMiss() { - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_id", - 1, - null, - null, - Collections.singletonMap("message", "foo:bar,baz") - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("message", "foo:bar,baz")); DissectProcessor dissectProcessor = new DissectProcessor("", null, "message", "%{a},%{b},%{c}", "", true); DissectException e = expectThrows(DissectException.class, () -> dissectProcessor.execute(ingestDocument)); - assertThat(e.getMessage(), CoreMatchers.containsString("Unable to find match for dissect pattern")); + assertThat(e.getMessage(), containsString("Unable to find match for dissect pattern")); } public void testNonStringValueWithIgnoreMissing() { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java index fc17506555ed..3e3c7af96486 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.test.ESTestCase; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -43,7 +42,7 @@ public void testEscapeFields() throws Exception { source = new HashMap<>(); source.put("foo.bar", "baz1"); - source.put("foo", new HashMap<>(Collections.singletonMap("bar", "baz2"))); + source.put("foo", new HashMap<>(Map.of("bar", "baz2"))); document = TestIngestDocument.withDefaultVersion(source); processor = new DotExpanderProcessor("_tag", null, null, "foo.bar"); processor.execute(document); @@ -55,7 +54,7 @@ public void testEscapeFields() throws Exception { source = new HashMap<>(); source.put("foo.bar", "2"); - source.put("foo", new HashMap<>(Collections.singletonMap("bar", 1))); + source.put("foo", new HashMap<>(Map.of("bar", 1))); document = TestIngestDocument.withDefaultVersion(source); processor = new DotExpanderProcessor("_tag", null, null, "foo.bar"); processor.execute(document); @@ -103,7 +102,7 @@ public void testEscapeFields_valueField() throws Exception { source = new HashMap<>(); source.put("foo.bar.baz", "baz1"); - source.put("foo", new HashMap<>(Collections.singletonMap("bar", new HashMap<>()))); + source.put("foo", new HashMap<>(Map.of("bar", new HashMap<>()))); document = TestIngestDocument.withDefaultVersion(source); processor = new DotExpanderProcessor("_tag", null, null, "foo.bar.baz"); processor.execute(document); @@ -113,7 +112,7 @@ public void testEscapeFields_valueField() throws Exception { source = new HashMap<>(); source.put("foo.bar.baz", "baz1"); - source.put("foo", new HashMap<>(Collections.singletonMap("bar", "baz2"))); + source.put("foo", new HashMap<>(Map.of("bar", "baz2"))); IngestDocument document2 = TestIngestDocument.withDefaultVersion(source); Processor processor2 = new DotExpanderProcessor("_tag", null, null, "foo.bar.baz"); e = expectThrows(IllegalArgumentException.class, () -> processor2.execute(document2)); @@ -122,7 +121,7 @@ public void testEscapeFields_valueField() throws Exception { public void testEscapeFields_path() throws Exception { Map source = new HashMap<>(); - source.put("foo", new HashMap<>(Collections.singletonMap("bar.baz", "value"))); + source.put("foo", new HashMap<>(Map.of("bar.baz", "value"))); IngestDocument document = TestIngestDocument.withDefaultVersion(source); DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, "foo", "bar.baz"); processor.execute(document); @@ -131,7 +130,7 @@ public void testEscapeFields_path() throws Exception { assertThat(document.getFieldValue("foo.bar.baz", String.class), equalTo("value")); source = new HashMap<>(); - source.put("field", new HashMap<>(Collections.singletonMap("foo.bar.baz", "value"))); + source.put("field", new HashMap<>(Map.of("foo.bar.baz", "value"))); document = TestIngestDocument.withDefaultVersion(source); processor = new DotExpanderProcessor("_tag", null, "field", "foo.bar.baz"); processor.execute(document); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java index 4cf6ce1df10e..ad73ef65c6c9 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FailProcessorFactoryTests.java @@ -14,11 +14,10 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Collections; import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class FailProcessorFactoryTests extends ESTestCase { @@ -35,7 +34,7 @@ public void testCreate() throws Exception { String processorTag = randomAlphaOfLength(10); FailProcessor failProcessor = factory.create(null, processorTag, null, config); assertThat(failProcessor.getTag(), equalTo(processorTag)); - assertThat(failProcessor.getMessage().newInstance(Collections.emptyMap()).execute(), equalTo("error")); + assertThat(failProcessor.getMessage().newInstance(Map.of()).execute(), equalTo("error")); } public void testCreateMissingMessageField() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FingerprintProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FingerprintProcessorFactoryTests.java index e7ec74520046..f9f808a0fe84 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FingerprintProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/FingerprintProcessorFactoryTests.java @@ -14,7 +14,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.Comparator; import java.util.HashMap; import java.util.List; @@ -75,7 +74,7 @@ public void testMethod() throws Exception { // invalid method String invalidMethod = randomValueOtherThanMany( - m -> Arrays.asList(FingerprintProcessor.Factory.SUPPORTED_DIGESTS).contains(m), + m -> List.of(FingerprintProcessor.Factory.SUPPORTED_DIGESTS).contains(m), () -> randomAlphaOfLengthBetween(5, 9) ); config.put("fields", fieldList); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java index e5bd1910aa92..233d8fb2b860 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java @@ -13,20 +13,20 @@ import org.elasticsearch.ingest.TestProcessor; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.function.Consumer; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; public class ForEachProcessorFactoryTests extends ESTestCase { private final ScriptService scriptService = mock(ScriptService.class); - private final Consumer genericExecutor = Runnable::run; public void testCreate() throws Exception { Processor processor = new TestProcessor(ingestDocument -> {}); @@ -36,11 +36,11 @@ public void testCreate() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); - config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); + config.put("processor", Map.of("_name", Collections.emptyMap())); ForEachProcessor forEachProcessor = forEachFactory.create(registry, null, null, config); - assertThat(forEachProcessor, Matchers.notNullValue()); + assertThat(forEachProcessor, notNullValue()); assertThat(forEachProcessor.getField(), equalTo("_field")); - assertThat(forEachProcessor.getInnerProcessor(), Matchers.sameInstance(processor)); + assertThat(forEachProcessor.getInnerProcessor(), sameInstance(processor)); assertFalse(forEachProcessor.isIgnoreMissing()); } @@ -52,12 +52,12 @@ public void testSetIgnoreMissing() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); - config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); + config.put("processor", Map.of("_name", Collections.emptyMap())); config.put("ignore_missing", true); ForEachProcessor forEachProcessor = forEachFactory.create(registry, null, null, config); - assertThat(forEachProcessor, Matchers.notNullValue()); + assertThat(forEachProcessor, notNullValue()); assertThat(forEachProcessor.getField(), equalTo("_field")); - assertThat(forEachProcessor.getInnerProcessor(), Matchers.sameInstance(processor)); + assertThat(forEachProcessor.getInnerProcessor(), sameInstance(processor)); assertTrue(forEachProcessor.isIgnoreMissing()); } @@ -71,8 +71,8 @@ public void testCreateWithTooManyProcessorTypes() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); Map processorTypes = new HashMap<>(); - processorTypes.put("_first", Collections.emptyMap()); - processorTypes.put("_second", Collections.emptyMap()); + processorTypes.put("_first", Map.of()); + processorTypes.put("_second", Map.of()); config.put("processor", processorTypes); Exception exception = expectThrows(ElasticsearchParseException.class, () -> forEachFactory.create(registry, null, null, config)); assertThat(exception.getMessage(), equalTo("[processor] Must specify exactly one processor type")); @@ -82,10 +82,10 @@ public void testCreateWithNonExistingProcessorType() throws Exception { ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); - config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); + config.put("processor", Map.of("_name", Collections.emptyMap())); Exception expectedException = expectThrows( ElasticsearchParseException.class, - () -> forEachFactory.create(Collections.emptyMap(), null, null, config) + () -> forEachFactory.create(Map.of(), null, null, config) ); assertThat(expectedException.getMessage(), equalTo("No processor type exists with name [_name]")); } @@ -96,7 +96,7 @@ public void testCreateWithMissingField() throws Exception { registry.put("_name", (r, t, description, c) -> processor); ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); - config.put("processor", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap()))); + config.put("processor", List.of(Map.of("_name", Map.of()))); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(registry, null, null, config)); assertThat(exception.getMessage(), equalTo("[field] required property is missing")); } @@ -105,7 +105,7 @@ public void testCreateWithMissingProcessor() { ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); - Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Collections.emptyMap(), null, null, config)); + Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Map.of(), null, null, config)); assertThat(exception.getMessage(), equalTo("[processor] required property is missing")); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java index 0d96e4c6680b..5f9936bd0c96 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorTests.java @@ -17,8 +17,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -27,7 +25,6 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; @@ -41,7 +38,7 @@ public void testExecuteWithAsyncProcessor() throws Exception { values.add("foo"); values.add("bar"); values.add("baz"); - IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("values", values)); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("values", values)); ForEachProcessor processor = new ForEachProcessor("_tag", null, "values", new AsyncUpperCaseProcessor("_ingest._value"), false); execProcessor(processor, ingestDocument, (result, e) -> {}); @@ -57,14 +54,7 @@ public void testExecuteWithAsyncProcessor() throws Exception { } public void testExecuteWithFailure() { - IngestDocument ingestDocument = new IngestDocument( - "_index", - "_id", - 1, - null, - null, - Collections.singletonMap("values", Arrays.asList("a", "b", "c")) - ); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("values", List.of("a", "b", "c"))); TestProcessor testProcessor = new TestProcessor(id -> { if ("c".equals(id.getFieldValue("_ingest._value", String.class))) { @@ -76,7 +66,7 @@ public void testExecuteWithFailure() { execProcessor(processor, ingestDocument, (result, e) -> exceptions[0] = e); assertThat(exceptions[0].getMessage(), equalTo("failure")); assertThat(testProcessor.getInvokedCounter(), equalTo(3)); - assertThat(ingestDocument.getFieldValue("values", List.class), equalTo(Arrays.asList("a", "b", "c"))); + assertThat(ingestDocument.getFieldValue("values", List.class), equalTo(List.of("a", "b", "c"))); testProcessor = new TestProcessor(id -> { String value = id.getFieldValue("_ingest._value", String.class); @@ -91,12 +81,12 @@ public void testExecuteWithFailure() { "_tag", null, "values", - new CompoundProcessor(false, Arrays.asList(testProcessor), Arrays.asList(onFailureProcessor)), + new CompoundProcessor(false, List.of(testProcessor), List.of(onFailureProcessor)), false ); execProcessor(processor, ingestDocument, (result, e) -> {}); assertThat(testProcessor.getInvokedCounter(), equalTo(3)); - assertThat(ingestDocument.getFieldValue("values", List.class), equalTo(Arrays.asList("A", "B", "c"))); + assertThat(ingestDocument.getFieldValue("values", List.class), equalTo(List.of("A", "B", "c"))); assertThat(ingestDocument.getFieldValue("error", String.class), equalTo("foo")); } @@ -104,7 +94,7 @@ public void testMetadataAvailable() { List> values = new ArrayList<>(); values.add(new HashMap<>()); values.add(new HashMap<>()); - IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("values", values)); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("values", values)); TestProcessor innerProcessor = new TestProcessor(id -> { id.setFieldValue("_ingest._value.index", id.getSourceAndMetadata().get("_index")); @@ -180,9 +170,9 @@ public String getDescription() { } }; int numValues = randomIntBetween(1, 10000); - List values = IntStream.range(0, numValues).mapToObj(i -> "").collect(Collectors.toList()); + List values = IntStream.range(0, numValues).mapToObj(i -> "").toList(); - IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("values", values)); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("values", values)); ForEachProcessor processor = new ForEachProcessor("_tag", null, "values", innerProcessor, false); execProcessor(processor, ingestDocument, (result, e) -> {}); @@ -198,7 +188,7 @@ public void testModifyFieldsOutsideArray() { values.add("string"); values.add(1); values.add(null); - IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("values", values)); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("values", values)); TemplateScript.Factory template = new TestTemplateService.MockTemplateScript.Factory("errors"); @@ -209,7 +199,7 @@ public void testModifyFieldsOutsideArray() { new CompoundProcessor( false, List.of(new UppercaseProcessor("_tag_upper", null, "_ingest._value", false, "_ingest._value")), - List.of(new AppendProcessor("_tag", null, template, (model) -> (Collections.singletonList("added")), true)) + List.of(new AppendProcessor("_tag", null, template, (model) -> (List.of("added")), true)) ), false ); @@ -262,7 +252,7 @@ public void testNestedForEach() { value.put("values2", innerValues); values.add(value); - IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Collections.singletonMap("values1", values)); + IngestDocument ingestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of("values1", values)); TestProcessor testProcessor = new TestProcessor( doc -> doc.setFieldValue("_ingest._value", doc.getFieldValue("_ingest._value", String.class).toUpperCase(Locale.ENGLISH)) @@ -337,7 +327,7 @@ public void testNestedForEachWithMapIteration() { } public void testIgnoreMissing() { - IngestDocument originalIngestDocument = new IngestDocument("_index", "_id", 1, null, null, Collections.emptyMap()); + IngestDocument originalIngestDocument = new IngestDocument("_index", "_id", 1, null, null, Map.of()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); TestProcessor testProcessor = new TestProcessor(doc -> {}); ForEachProcessor processor = new ForEachProcessor("_tag", null, "_ingest._value", testProcessor, true); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java index 300f58de884e..41154634b43e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorFactoryTests.java @@ -12,8 +12,8 @@ import org.elasticsearch.grok.MatcherWatchdog; import org.elasticsearch.test.ESTestCase; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -27,7 +27,7 @@ public void testBuild() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); - config.put("patterns", Collections.singletonList("(?\\w+)")); + config.put("patterns", List.of("(?\\w+)")); String processorTag = randomAlphaOfLength(10); GrokProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); @@ -41,7 +41,7 @@ public void testBuildWithIgnoreMissing() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); - config.put("patterns", Collections.singletonList("(?\\w+)")); + config.put("patterns", List.of("(?\\w+)")); config.put("ignore_missing", true); String processorTag = randomAlphaOfLength(10); GrokProcessor processor = factory.create(null, processorTag, null, config); @@ -54,7 +54,7 @@ public void testBuildWithIgnoreMissing() throws Exception { public void testBuildMissingField() throws Exception { GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); - config.put("patterns", Collections.singletonList("(?\\w+)")); + config.put("patterns", List.of("(?\\w+)")); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); assertThat(e.getMessage(), equalTo("[field] required property is missing")); } @@ -71,7 +71,7 @@ public void testBuildEmptyPatternsList() throws Exception { GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "foo"); - config.put("patterns", Collections.emptyList()); + config.put("patterns", List.of()); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); assertThat(e.getMessage(), equalTo("[patterns] List of patterns must not be empty")); } @@ -81,8 +81,8 @@ public void testCreateWithCustomPatterns() throws Exception { Map config = new HashMap<>(); config.put("field", "_field"); - config.put("patterns", Collections.singletonList("%{MY_PATTERN:name}!")); - config.put("pattern_definitions", Collections.singletonMap("MY_PATTERN", "foo")); + config.put("patterns", List.of("%{MY_PATTERN:name}!")); + config.put("pattern_definitions", Map.of("MY_PATTERN", "foo")); GrokProcessor processor = factory.create(null, null, null, config); assertThat(processor.getMatchField(), equalTo("_field")); assertThat(processor.getGrok(), notNullValue()); @@ -93,7 +93,7 @@ public void testCreateWithInvalidPattern() throws Exception { GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); - config.put("patterns", Collections.singletonList("[")); + config.put("patterns", List.of("[")); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); assertThat(e.getMessage(), equalTo("[patterns] Invalid regex pattern found in: [[]. premature end of char-class")); } @@ -102,8 +102,8 @@ public void testCreateWithInvalidPatternDefinition() throws Exception { GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); - config.put("patterns", Collections.singletonList("%{MY_PATTERN:name}!")); - config.put("pattern_definitions", Collections.singletonMap("MY_PATTERN", "[")); + config.put("patterns", List.of("%{MY_PATTERN:name}!")); + config.put("pattern_definitions", Map.of("MY_PATTERN", "[")); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); assertThat( e.getMessage(), @@ -115,7 +115,7 @@ public void testCreateWithInvalidEcsCompatibilityMode() throws Exception { GrokProcessor.Factory factory = new GrokProcessor.Factory(MatcherWatchdog.noop()); Map config = new HashMap<>(); config.put("field", "_field"); - config.put("patterns", Collections.singletonList("(?\\w+)")); + config.put("patterns", List.of("(?\\w+)")); String invalidEcsMode = randomAlphaOfLength(3); config.put("ecs_compatibility", invalidEcsMode); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(null, null, null, config)); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java index bf7b18814ca4..75f6d7a61735 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorGetActionTests.java @@ -26,11 +26,11 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; -import static org.hamcrest.core.IsNull.notNullValue; -import static org.hamcrest.core.IsNull.nullValue; import static org.mockito.Mockito.mock; public class GrokProcessorGetActionTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java index bc8efeb32b56..5d568b7976fa 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java @@ -14,9 +14,8 @@ import org.elasticsearch.ingest.TestIngestDocument; import org.elasticsearch.test.ESTestCase; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; @@ -31,8 +30,8 @@ public void testMatch() throws Exception { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, false, @@ -49,8 +48,8 @@ public void testIgnoreCase() throws Exception { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.emptyMap(), - Collections.singletonList("(?(?i)A)"), + Map.of(), + List.of("(?(?i)A)"), fieldName, false, false, @@ -67,8 +66,8 @@ public void testNoMatch() { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, false, @@ -87,8 +86,8 @@ public void testNoMatchingPatternName() { () -> new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{NOTONE:not_one}"), + Map.of("ONE", "1"), + List.of("%{NOTONE:not_one}"), fieldName, false, false, @@ -106,8 +105,8 @@ public void testMatchWithoutCaptures() throws Exception { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.emptyMap(), - Collections.singletonList(fieldName), + Map.of(), + List.of(fieldName), fieldName, false, false, @@ -124,8 +123,8 @@ public void testNullField() { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, false, @@ -143,8 +142,8 @@ public void testNullFieldWithIgnoreMissing() throws Exception { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, true, @@ -161,8 +160,8 @@ public void testNotStringField() { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, false, @@ -179,8 +178,8 @@ public void testNotStringFieldWithIgnoreMissing() { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, true, @@ -196,8 +195,8 @@ public void testMissingField() { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, false, @@ -214,8 +213,8 @@ public void testMissingFieldWithIgnoreMissing() throws Exception { GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, - Collections.singletonMap("ONE", "1"), - Collections.singletonList("%{ONE:one}"), + Map.of("ONE", "1"), + List.of("%{ONE:one}"), fieldName, false, true, @@ -237,7 +236,7 @@ public void testMultiplePatternsWithMatchReturn() throws Exception { randomAlphaOfLength(10), null, patternBank, - Arrays.asList("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), + List.of("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), fieldName, false, false, @@ -261,7 +260,7 @@ public void testSetMetadata() throws Exception { randomAlphaOfLength(10), null, patternBank, - Arrays.asList("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), + List.of("%{ONE:one}", "%{TWO:two}", "%{THREE:three}"), fieldName, true, false, @@ -284,7 +283,7 @@ public void testTraceWithOnePattern() throws Exception { randomAlphaOfLength(10), null, patternBank, - Arrays.asList("%{ONE:one}"), + List.of("%{ONE:one}"), fieldName, true, false, @@ -297,17 +296,17 @@ public void testTraceWithOnePattern() throws Exception { public void testCombinedPatterns() { String combined; - combined = GrokProcessor.combinePatterns(Arrays.asList(""), false); + combined = GrokProcessor.combinePatterns(List.of(""), false); assertThat(combined, equalTo("")); - combined = GrokProcessor.combinePatterns(Arrays.asList(""), true); + combined = GrokProcessor.combinePatterns(List.of(""), true); assertThat(combined, equalTo("")); - combined = GrokProcessor.combinePatterns(Arrays.asList("foo"), false); + combined = GrokProcessor.combinePatterns(List.of("foo"), false); assertThat(combined, equalTo("foo")); - combined = GrokProcessor.combinePatterns(Arrays.asList("foo"), true); + combined = GrokProcessor.combinePatterns(List.of("foo"), true); assertThat(combined, equalTo("foo")); - combined = GrokProcessor.combinePatterns(Arrays.asList("foo", "bar"), false); + combined = GrokProcessor.combinePatterns(List.of("foo", "bar"), false); assertThat(combined, equalTo("(?:foo)|(?:bar)")); - combined = GrokProcessor.combinePatterns(Arrays.asList("foo", "bar"), true); + combined = GrokProcessor.combinePatterns(List.of("foo", "bar"), true); assertThat(combined, equalTo("(?<_ingest._grok_match_index.0>foo)|(?<_ingest._grok_match_index.1>bar)")); } @@ -323,7 +322,7 @@ public void testCombineSamePatternNameAcrossPatterns() throws Exception { randomAlphaOfLength(10), null, patternBank, - Arrays.asList("%{ONE:first}-%{TWO:second}", "%{ONE:first}-%{THREE:second}"), + List.of("%{ONE:first}-%{TWO:second}", "%{ONE:first}-%{THREE:second}"), fieldName, randomBoolean(), randomBoolean(), @@ -334,7 +333,7 @@ public void testCombineSamePatternNameAcrossPatterns() throws Exception { assertThat(doc.getFieldValue("second", String.class), equalTo("3")); } - public void testFirstWinNamedCapture() throws Exception { + public void testShouldCaptureAllSameNameGroupsAsList() throws Exception { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); doc.setFieldValue(fieldName, "12"); @@ -344,14 +343,14 @@ public void testFirstWinNamedCapture() throws Exception { randomAlphaOfLength(10), null, patternBank, - Collections.singletonList("%{ONETWO:first}%{ONETWO:first}"), + List.of("%{ONETWO:first}%{ONETWO:first}"), fieldName, randomBoolean(), randomBoolean(), MatcherWatchdog.noop() ); processor.execute(doc); - assertThat(doc.getFieldValue("first", String.class), equalTo("1")); + assertEquals(doc.getFieldValue("first", List.class), List.of("1", "2")); } public void testUnmatchedNamesNotIncludedInDocument() throws Exception { @@ -365,7 +364,7 @@ public void testUnmatchedNamesNotIncludedInDocument() throws Exception { randomAlphaOfLength(10), null, patternBank, - Collections.singletonList("%{ONETWO:first}|%{THREE:second}"), + List.of("%{ONETWO:first}|%{THREE:second}"), fieldName, randomBoolean(), randomBoolean(), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java index 0e0d13cb9269..ce5a9e798741 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java @@ -13,8 +13,8 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class GsubProcessorFactoryTests extends AbstractStringProcessorFactoryTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java index 25303a42cf24..8f768f9c01bb 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JoinProcessorFactoryTests.java @@ -14,7 +14,7 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class JoinProcessorFactoryTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java index c6d7d9deff80..02c936bb4b62 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorFactoryTests.java @@ -10,12 +10,14 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.test.ESTestCase; import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class JsonProcessorFactoryTests extends ESTestCase { @@ -68,6 +70,29 @@ public void testCreateWithMissingField() throws Exception { assertThat(exception.getMessage(), equalTo("[field] required property is missing")); } + public void testCreateWithStrictParsingParameter() throws Exception { + String fieldName = randomAlphaOfLength(10); + String processorTag = randomAlphaOfLength(10); + IngestDocument document = new IngestDocument("_index", "_id", 1, null, null, Map.of(fieldName, "123 \"foo\"")); + + { + Map strictConfig = new HashMap<>(); + strictConfig.put("field", fieldName); + JsonProcessor strictProcessor = FACTORY.create(null, processorTag, null, strictConfig); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> strictProcessor.execute(document)); + assertThat(exception.getMessage(), containsString("is not valid JSON and the strict_json_parsing parameter is true")); + } + + { + Map lenientConfig = new HashMap<>(); + lenientConfig.put("field", fieldName); + lenientConfig.put("strict_json_parsing", false); + JsonProcessor lenientProcessor = FACTORY.create(null, processorTag, null, lenientConfig); + IngestDocument result = lenientProcessor.execute(document); + assertThat(result.getSource().get(fieldName), equalTo(123)); + } + } + public void testCreateWithBothTargetFieldAndAddToRoot() throws Exception { String randomField = randomAlphaOfLength(10); String randomTargetField = randomAlphaOfLength(5); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java index a9596fb0083a..068a93d438a1 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/JsonProcessorTests.java @@ -17,15 +17,17 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import static org.elasticsearch.ingest.common.JsonProcessor.ConflictStrategy.MERGE; import static org.elasticsearch.ingest.common.JsonProcessor.ConflictStrategy.REPLACE; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class JsonProcessorTests extends ESTestCase { @@ -128,7 +130,7 @@ public void testString() throws Exception { public void testArray() throws Exception { JsonProcessor jsonProcessor = new JsonProcessor("tag", null, "field", "target_field", false, REPLACE, false); Map document = new HashMap<>(); - List value = Arrays.asList(true, true, false); + List value = List.of(true, true, false); document.put("field", value.toString()); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); jsonProcessor.execute(ingestDocument); @@ -235,4 +237,86 @@ public void testAddBoolToRoot() { Exception exception = expectThrows(IllegalArgumentException.class, () -> jsonProcessor.execute(ingestDocument)); assertThat(exception.getMessage(), containsString("cannot add non-map fields to root of document")); } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void testApply() { + { + Object result = JsonProcessor.apply("{\"foo\":\"bar\"}", true, true); + assertThat(result, instanceOf(Map.class)); + Map resultMap = (Map) result; + assertThat(resultMap.size(), equalTo(1)); + assertThat(resultMap.get("foo"), equalTo("bar")); + } + { + Object result = JsonProcessor.apply("\"foo\"", true, true); + assertThat(result, instanceOf(String.class)); + assertThat(result, equalTo("foo")); + } + { + boolean boolValue = randomBoolean(); + Object result = JsonProcessor.apply(Boolean.toString(boolValue), true, true); + assertThat(result, instanceOf(Boolean.class)); + assertThat(result, equalTo(boolValue)); + } + { + double value = randomDouble(); + Object result = JsonProcessor.apply(Double.toString(value), true, true); + assertThat(result, instanceOf(Double.class)); + assertThat((double) result, closeTo(value, .001)); + } + { + List list = randomList(10, ESTestCase::randomDouble); + String value = list.stream().map(val -> Double.toString(val)).collect(Collectors.joining(",", "[", "]")); + Object result = JsonProcessor.apply(value, true, true); + assertThat(result, instanceOf(List.class)); + List resultList = (List) result; + assertThat(resultList.size(), equalTo(list.size())); + for (int i = 0; i < list.size(); i++) { + assertThat(resultList.get(i), closeTo(list.get(i), .001)); + } + } + } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + public void testApplyWithInvalidJson() { + /* + * The following fail whether strictJsonParsing is set to true or false. The reason is that even the first token cannot be parsed + * as JSON (since the first token is a not a primitive or an object -- just characters not in quotes). + */ + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("foo", true, true)); + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("foo", true, false)); + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("foo [360113.865822] wbrdg-0afe001ce", true, true)); + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("foo [360113.865822] wbrdg-0afe001ce", true, false)); + + /* + * The following are examples of malformed json but the first part of each is valid json. Previously apply parsed just the first + * token and ignored the rest, but it now throw an IllegalArgumentException unless strictJsonParsing is set to false. See + * https://github.com/elastic/elasticsearch/issues/92898. + */ + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("123 foo", true, true)); + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("45 this is {\"a\": \"json\"}", true, true)); + + { + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("[360113.865822] wbrdg-0afe001ce", true, true)); + Object result = JsonProcessor.apply("[360113.865822] wbrdg-0afe001ce", true, false); + assertThat(result, instanceOf(List.class)); + List resultList = (List) result; + assertThat(resultList.size(), equalTo(1)); + assertThat(resultList.get(0), closeTo(360113.865822, .001)); + } + { + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply("{\"foo\":\"bar\"} wbrdg-0afe00e", true, true)); + Object result = JsonProcessor.apply("{\"foo\":\"bar\"} wbrdg-0afe00e", true, false); + assertThat(result, instanceOf(Map.class)); + Map resultMap = (Map) result; + assertThat(resultMap.size(), equalTo(1)); + assertThat(resultMap.get("foo"), equalTo("bar")); + } + { + expectThrows(IllegalArgumentException.class, () -> JsonProcessor.apply(" 1268 : TimeOut = 123 : a", true, true)); + Object result = JsonProcessor.apply(" 1268 : TimeOut = 123 : a", true, false); + assertThat(result, instanceOf(Integer.class)); + assertThat(result, equalTo(1268)); + } + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java index 3495428c9567..5d1669a33b7b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorFactoryTests.java @@ -15,12 +15,12 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Set; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -41,7 +41,7 @@ public void testCreateWithDefaults() throws Exception { String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); + assertThat(processor.getField().newInstance(Map.of()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), is(nullValue())); @@ -55,18 +55,18 @@ public void testCreateWithAllFieldsSet() throws Exception { config.put("field_split", "&"); config.put("value_split", "="); config.put("target_field", "target"); - config.put("include_keys", Arrays.asList("a", "b")); - config.put("exclude_keys", Collections.emptyList()); + config.put("include_keys", List.of("a", "b")); + config.put("exclude_keys", List.of()); config.put("ignore_missing", true); String processorTag = randomAlphaOfLength(10); KeyValueProcessor processor = factory.create(null, processorTag, null, config); assertThat(processor.getTag(), equalTo(processorTag)); - assertThat(processor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); + assertThat(processor.getField().newInstance(Map.of()).execute(), equalTo("field1")); assertThat(processor.getFieldSplit(), equalTo("&")); assertThat(processor.getValueSplit(), equalTo("=")); assertThat(processor.getIncludeKeys(), equalTo(Sets.newHashSet("a", "b"))); - assertThat(processor.getExcludeKeys(), equalTo(Collections.emptySet())); - assertThat(processor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); + assertThat(processor.getExcludeKeys(), equalTo(Set.of())); + assertThat(processor.getTargetField().newInstance(Map.of()).execute(), equalTo("target")); assertTrue(processor.isIgnoreMissing()); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java index 44eed2611723..af9d7d952f33 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/KeyValueProcessorTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -36,24 +35,24 @@ public void test() throws Exception { Processor processor = createKvProcessor(fieldName, "&", "=", null, null, "target", false); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("target.first", String.class), equalTo("hello")); - assertThat(ingestDocument.getFieldValue("target.second", List.class), equalTo(Arrays.asList("world", "universe"))); + assertThat(ingestDocument.getFieldValue("target.second", List.class), equalTo(List.of("world", "universe"))); } public void testRootTarget() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); ingestDocument.setFieldValue("myField", "first=hello&second=world&second=universe"); Processor processor = createKvProcessor("myField", "&", "=", null, null, null, false); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("first", String.class), equalTo("hello")); - assertThat(ingestDocument.getFieldValue("second", List.class), equalTo(Arrays.asList("world", "universe"))); + assertThat(ingestDocument.getFieldValue("second", List.class), equalTo(List.of("world", "universe"))); } public void testKeySameAsSourceField() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); ingestDocument.setFieldValue("first", "first=hello"); Processor processor = createKvProcessor("first", "&", "=", null, null, null, false); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue("first", List.class), equalTo(Arrays.asList("first=hello", "hello"))); + assertThat(ingestDocument.getFieldValue("first", List.class), equalTo(List.of("first=hello", "hello"))); } public void testIncludeKeys() throws Exception { @@ -97,7 +96,7 @@ public void testIncludeAndExcludeKeys() throws Exception { } public void testMissingField() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); Processor processor = createKvProcessor("unknown", "&", "=", null, null, "target", false); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [unknown] doesn't exist")); @@ -116,7 +115,7 @@ public void testNullValueWithIgnoreMissing() throws Exception { } public void testNonExistentWithIgnoreMissing() throws Exception { - IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = createKvProcessor("unknown", "", "", null, null, "target", true); processor.execute(ingestDocument); @@ -133,7 +132,7 @@ public void testFailFieldSplitMatch() throws Exception { } public void testFailValueSplitMatch() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.singletonMap("foo", "bar")); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of("foo", "bar")); Processor processor = createKvProcessor("foo", "&", "=", null, null, "target", false); Exception exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [foo] does not contain value_split [=]")); @@ -145,7 +144,7 @@ public void testTrimKeyAndValue() throws Exception { Processor processor = createKvProcessor(fieldName, "&", "=", null, null, "target", false, " ", " ", false, null); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("target.first", String.class), equalTo("hello")); - assertThat(ingestDocument.getFieldValue("target.second", List.class), equalTo(Arrays.asList("world", "universe"))); + assertThat(ingestDocument.getFieldValue("target.second", List.class), equalTo(List.of("world", "universe"))); } public void testTrimMultiCharSequence() throws Exception { @@ -177,7 +176,7 @@ public void testStripBrackets() throws Exception { Processor processor = createKvProcessor(fieldName, "&", "=", null, null, "target", false, null, null, true, null); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("target.first", String.class), equalTo("hello")); - assertThat(ingestDocument.getFieldValue("target.second", List.class), equalTo(Arrays.asList("world", "universe"))); + assertThat(ingestDocument.getFieldValue("target.second", List.class), equalTo(List.of("world", "universe"))); assertThat(ingestDocument.getFieldValue("target.third", String.class), equalTo("foo")); assertThat(ingestDocument.getFieldValue("target.fourth", String.class), equalTo("bar")); assertThat(ingestDocument.getFieldValue("target.fifth", String.class), equalTo("last")); @@ -189,7 +188,7 @@ public void testAddPrefix() throws Exception { Processor processor = createKvProcessor(fieldName, "&", "=", null, null, "target", false, null, null, false, "arg_"); processor.execute(ingestDocument); assertThat(ingestDocument.getFieldValue("target.arg_first", String.class), equalTo("hello")); - assertThat(ingestDocument.getFieldValue("target.arg_second", List.class), equalTo(Arrays.asList("world", "universe"))); + assertThat(ingestDocument.getFieldValue("target.arg_second", List.class), equalTo(List.of("world", "universe"))); } private static KeyValueProcessor createKvProcessor( diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorFactoryTests.java index deece8362678..59787e106307 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorFactoryTests.java @@ -14,7 +14,6 @@ import org.junit.Before; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -22,7 +21,7 @@ import static org.elasticsearch.ingest.common.NetworkDirectionProcessor.Factory.DEFAULT_DEST_IP; import static org.elasticsearch.ingest.common.NetworkDirectionProcessor.Factory.DEFAULT_SOURCE_IP; import static org.elasticsearch.ingest.common.NetworkDirectionProcessor.Factory.DEFAULT_TARGET; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class NetworkDirectionProcessorFactoryTests extends ESTestCase { @@ -56,7 +55,7 @@ public void testCreate() throws Exception { assertThat(networkProcessor.getDestinationIpField(), equalTo(destIpField)); assertThat(networkProcessor.getTargetField(), equalTo(targetField)); assertThat(networkProcessor.getInternalNetworks().size(), greaterThan(0)); - assertThat(networkProcessor.getInternalNetworks().get(0).newInstance(Collections.emptyMap()).execute(), equalTo("10.0.0.0/8")); + assertThat(networkProcessor.getInternalNetworks().get(0).newInstance(Map.of()).execute(), equalTo("10.0.0.0/8")); assertThat(networkProcessor.getIgnoreMissing(), equalTo(ignoreMissing)); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java index 4ae543afc393..7c53df0ca3f4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/NetworkDirectionProcessorTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -183,7 +182,7 @@ private void testNetworkDirectionProcessor( ) throws Exception { List networks = null; - if (internalNetworks != null) networks = Arrays.asList(internalNetworks); + if (internalNetworks != null) networks = List.of(internalNetworks); String processorTag = randomAlphaOfLength(10); Map config = new HashMap<>(); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorFactoryTests.java index f5100298c63d..5dc20dd9e07b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RegisteredDomainProcessorFactoryTests.java @@ -15,7 +15,7 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class RegisteredDomainProcessorFactoryTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java index 876f7823839f..49a9dd065184 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorFactoryTests.java @@ -14,13 +14,11 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.stream.Collectors; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class RemoveProcessorFactoryTests extends ESTestCase { @@ -37,31 +35,28 @@ public void testCreate() throws Exception { String processorTag = randomAlphaOfLength(10); RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config); assertThat(removeProcessor.getTag(), equalTo(processorTag)); - assertThat(removeProcessor.getFieldsToRemove().get(0).newInstance(Collections.emptyMap()).execute(), equalTo("field1")); + assertThat(removeProcessor.getFieldsToRemove().get(0).newInstance(Map.of()).execute(), equalTo("field1")); } public void testCreateKeepField() throws Exception { Map config = new HashMap<>(); - config.put("keep", Arrays.asList("field1", "field2")); + config.put("keep", List.of("field1", "field2")); String processorTag = randomAlphaOfLength(10); RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config); assertThat(removeProcessor.getTag(), equalTo(processorTag)); - assertThat(removeProcessor.getFieldsToKeep().get(0).newInstance(Collections.emptyMap()).execute(), equalTo("field1")); - assertThat(removeProcessor.getFieldsToKeep().get(1).newInstance(Collections.emptyMap()).execute(), equalTo("field2")); + assertThat(removeProcessor.getFieldsToKeep().get(0).newInstance(Map.of()).execute(), equalTo("field1")); + assertThat(removeProcessor.getFieldsToKeep().get(1).newInstance(Map.of()).execute(), equalTo("field2")); } public void testCreateMultipleFields() throws Exception { Map config = new HashMap<>(); - config.put("field", Arrays.asList("field1", "field2")); + config.put("field", List.of("field1", "field2")); String processorTag = randomAlphaOfLength(10); RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config); assertThat(removeProcessor.getTag(), equalTo(processorTag)); assertThat( - removeProcessor.getFieldsToRemove() - .stream() - .map(template -> template.newInstance(Collections.emptyMap()).execute()) - .collect(Collectors.toList()), - equalTo(Arrays.asList("field1", "field2")) + removeProcessor.getFieldsToRemove().stream().map(template -> template.newInstance(Map.of()).execute()).toList(), + equalTo(List.of("field1", "field2")) ); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorTests.java index f17f019dc36c..5bda0972401e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RemoveProcessorTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -32,8 +31,8 @@ public void testRemoveFields() throws Exception { Processor processor = new RemoveProcessor( randomAlphaOfLength(10), null, - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory(field)), - Collections.emptyList(), + List.of(new TestTemplateService.MockTemplateScript.Factory(field)), + List.of(), false ); processor.execute(ingestDocument); @@ -107,34 +106,16 @@ public void testShouldKeep(String a, String b) { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), source); - assertTrue( - RemoveProcessor.shouldKeep( - "name", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("name")), - ingestDocument - ) - ); + assertTrue(RemoveProcessor.shouldKeep("name", List.of(new TestTemplateService.MockTemplateScript.Factory("name")), ingestDocument)); - assertTrue( - RemoveProcessor.shouldKeep( - "age", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("age")), - ingestDocument - ) - ); + assertTrue(RemoveProcessor.shouldKeep("age", List.of(new TestTemplateService.MockTemplateScript.Factory("age")), ingestDocument)); - assertFalse( - RemoveProcessor.shouldKeep( - "name", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("age")), - ingestDocument - ) - ); + assertFalse(RemoveProcessor.shouldKeep("name", List.of(new TestTemplateService.MockTemplateScript.Factory("age")), ingestDocument)); assertTrue( RemoveProcessor.shouldKeep( "address", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("address.street")), + List.of(new TestTemplateService.MockTemplateScript.Factory("address.street")), ingestDocument ) ); @@ -142,7 +123,7 @@ public void testShouldKeep(String a, String b) { assertTrue( RemoveProcessor.shouldKeep( "address", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("address.number")), + List.of(new TestTemplateService.MockTemplateScript.Factory("address.number")), ingestDocument ) ); @@ -150,7 +131,7 @@ public void testShouldKeep(String a, String b) { assertTrue( RemoveProcessor.shouldKeep( "address.street", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("address")), + List.of(new TestTemplateService.MockTemplateScript.Factory("address")), ingestDocument ) ); @@ -158,23 +139,19 @@ public void testShouldKeep(String a, String b) { assertTrue( RemoveProcessor.shouldKeep( "address.number", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("address")), + List.of(new TestTemplateService.MockTemplateScript.Factory("address")), ingestDocument ) ); assertTrue( - RemoveProcessor.shouldKeep( - "address", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("address")), - ingestDocument - ) + RemoveProcessor.shouldKeep("address", List.of(new TestTemplateService.MockTemplateScript.Factory("address")), ingestDocument) ); assertFalse( RemoveProcessor.shouldKeep( "address.street", - Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory("address.number")), + List.of(new TestTemplateService.MockTemplateScript.Factory("address.number")), ingestDocument ) ); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java index ad1f6f0962de..2299081eb22c 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorFactoryTests.java @@ -13,11 +13,10 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Collections; import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class RenameProcessorFactoryTests extends ESTestCase { @@ -35,8 +34,8 @@ public void testCreate() throws Exception { String processorTag = randomAlphaOfLength(10); RenameProcessor renameProcessor = factory.create(null, processorTag, null, config); assertThat(renameProcessor.getTag(), equalTo(processorTag)); - assertThat(renameProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("old_field")); - assertThat(renameProcessor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("new_field")); + assertThat(renameProcessor.getField().newInstance(Map.of()).execute(), equalTo("old_field")); + assertThat(renameProcessor.getTargetField().newInstance(Map.of()).execute(), equalTo("new_field")); assertThat(renameProcessor.isIgnoreMissing(), equalTo(false)); } @@ -48,8 +47,8 @@ public void testCreateWithIgnoreMissing() throws Exception { String processorTag = randomAlphaOfLength(10); RenameProcessor renameProcessor = factory.create(null, processorTag, null, config); assertThat(renameProcessor.getTag(), equalTo(processorTag)); - assertThat(renameProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("old_field")); - assertThat(renameProcessor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("new_field")); + assertThat(renameProcessor.getField().newInstance(Map.of()).execute(), equalTo("old_field")); + assertThat(renameProcessor.getTargetField().newInstance(Map.of()).execute(), equalTo("new_field")); assertThat(renameProcessor.isIgnoreMissing(), equalTo(true)); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index 5908fc8784d8..1d10c3090990 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -50,8 +49,8 @@ public void testRenameArrayElement() throws Exception { list.add("item3"); document.put("list", list); List> one = new ArrayList<>(); - one.add(Collections.singletonMap("one", "one")); - one.add(Collections.singletonMap("two", "two")); + one.add(Map.of("one", "one")); + one.add(Map.of("two", "two")); document.put("one", one); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); @@ -139,7 +138,7 @@ public void testRenameExistingFieldNullValue() throws Exception { public void testRenameAtomicOperationSetFails() throws Exception { Map metadata = new HashMap<>(); - metadata.put("list", Collections.singletonList("item")); + metadata.put("list", List.of("item")); IngestDocument ingestDocument = TestIngestDocument.ofMetadataWithValidator( metadata, @@ -162,7 +161,7 @@ public void testRenameAtomicOperationSetFails() throws Exception { public void testRenameAtomicOperationRemoveFails() throws Exception { Map metadata = new HashMap<>(); - metadata.put("list", Collections.singletonList("item")); + metadata.put("list", List.of("item")); IngestDocument ingestDocument = TestIngestDocument.ofMetadataWithValidator( metadata, @@ -185,16 +184,13 @@ public void testRenameLeafIntoBranch() throws Exception { IngestDocument ingestDocument = TestIngestDocument.withDefaultVersion(source); Processor processor1 = createRenameProcessor("foo", "foo.bar", false); processor1.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue("foo", Map.class), equalTo(Collections.singletonMap("bar", "bar"))); + assertThat(ingestDocument.getFieldValue("foo", Map.class), equalTo(Map.of("bar", "bar"))); assertThat(ingestDocument.getFieldValue("foo.bar", String.class), equalTo("bar")); Processor processor2 = createRenameProcessor("foo.bar", "foo.bar.baz", false); processor2.execute(ingestDocument); - assertThat( - ingestDocument.getFieldValue("foo", Map.class), - equalTo(Collections.singletonMap("bar", Collections.singletonMap("baz", "bar"))) - ); - assertThat(ingestDocument.getFieldValue("foo.bar", Map.class), equalTo(Collections.singletonMap("baz", "bar"))); + assertThat(ingestDocument.getFieldValue("foo", Map.class), equalTo(Map.of("bar", Map.of("baz", "bar")))); + assertThat(ingestDocument.getFieldValue("foo.bar", Map.class), equalTo(Map.of("baz", "bar"))); assertThat(ingestDocument.getFieldValue("foo.bar.baz", String.class), equalTo("bar")); // for fun lets try to restore it (which don't allow today) diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java index 4a8a01218b52..b3e4a870177b 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorFactoryTests.java @@ -23,14 +23,14 @@ import org.elasticsearch.xcontent.XContentParseException; import org.junit.Before; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -55,7 +55,7 @@ public void testFactoryValidationWithDefaultLang() throws Exception { ScriptProcessor processor = factory.create(null, randomAlphaOfLength(10), null, configMap); assertThat(processor.getScript().getLang(), equalTo(randomType.equals("id") ? null : Script.DEFAULT_SCRIPT_LANG)); assertThat(processor.getScript().getType().toString(), equalTo(INGEST_SCRIPT_PARAM_TO_TYPE.get(randomType))); - assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap())); + assertThat(processor.getScript().getParams(), equalTo(Map.of())); } public void testFactoryValidationWithParams() throws Exception { @@ -65,7 +65,7 @@ public void testFactoryValidationWithParams() throws Exception { Map configMap = new HashMap<>(); String randomType = randomFrom("id", "source"); - Map randomParams = Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)); + Map randomParams = Map.of(randomAlphaOfLength(10), randomAlphaOfLength(10)); configMap.put(randomType, "foo"); configMap.put("params", randomParams); ScriptProcessor processor = factory.create(null, randomAlphaOfLength(10), null, configMap); @@ -117,7 +117,7 @@ public void testFactoryInvalidateWithInvalidCompiledScript() throws Exception { ScriptException thrownException = new ScriptException( "compile-time exception", new RuntimeException(), - Collections.emptyList(), + List.of(), "script", "mockscript" ); @@ -139,13 +139,10 @@ public void testInlineIsCompiled() throws Exception { String scriptName = "foo"; ScriptService scriptService = new ScriptService( Settings.builder().build(), - Collections.singletonMap( - Script.DEFAULT_SCRIPT_LANG, - new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> { - ctx.put("foo", "bar"); - return null; - }), Collections.emptyMap()) - ), + Map.of(Script.DEFAULT_SCRIPT_LANG, new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Map.of(scriptName, ctx -> { + ctx.put("foo", "bar"); + return null; + }), Map.of())), new HashMap<>(ScriptModule.CORE_CONTEXTS), () -> 1L ); @@ -156,7 +153,7 @@ public void testInlineIsCompiled() throws Exception { ScriptProcessor processor = factory.create(null, null, randomAlphaOfLength(10), configMap); assertThat(processor.getScript().getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); assertThat(processor.getScript().getType(), equalTo(ScriptType.INLINE)); - assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap())); + assertThat(processor.getScript().getParams(), equalTo(Map.of())); assertNotNull(processor.getPrecompiledIngestScriptFactory()); CtxMap ctx = TestIngestDocument.emptyIngestDocument().getCtxMap(); processor.getPrecompiledIngestScriptFactory().newInstance(null, ctx).execute(); @@ -172,7 +169,7 @@ public void testStoredIsNotCompiled() throws Exception { ScriptProcessor processor = factory.create(null, null, randomAlphaOfLength(10), configMap); assertNull(processor.getScript().getLang()); assertThat(processor.getScript().getType(), equalTo(ScriptType.STORED)); - assertThat(processor.getScript().getParams(), equalTo(Collections.emptyMap())); + assertThat(processor.getScript().getParams(), equalTo(Map.of())); assertNull(processor.getPrecompiledIngestScriptFactory()); } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java index 972ca029b7b0..91dd0c861c02 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java @@ -20,13 +20,12 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.core.Is.is; +import static org.hamcrest.Matchers.is; public class ScriptProcessorTests extends ESTestCase { @@ -39,20 +38,17 @@ public void setupScripting() { String scriptName = "script"; scriptService = new ScriptService( Settings.builder().build(), - Collections.singletonMap( - Script.DEFAULT_SCRIPT_LANG, - new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> { - Integer bytesIn = (Integer) ctx.get("bytes_in"); - Integer bytesOut = (Integer) ctx.get("bytes_out"); - ctx.put("bytes_total", bytesIn + bytesOut); - ctx.put("_dynamic_templates", Map.of("foo", "bar")); - return null; - }), Collections.emptyMap()) - ), + Map.of(Script.DEFAULT_SCRIPT_LANG, new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Map.of(scriptName, ctx -> { + Integer bytesIn = (Integer) ctx.get("bytes_in"); + Integer bytesOut = (Integer) ctx.get("bytes_out"); + ctx.put("bytes_total", bytesIn + bytesOut); + ctx.put("_dynamic_templates", Map.of("foo", "bar")); + return null; + }), Map.of())), new HashMap<>(ScriptModule.CORE_CONTEXTS), () -> 1L ); - script = new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()); + script = new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Map.of()); ingestScriptFactory = scriptService.compile(script, IngestScript.CONTEXT); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java index ac1af5f3dcdd..085cc68fe363 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java @@ -15,13 +15,12 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class SetProcessorFactoryTests extends ESTestCase { @@ -39,8 +38,8 @@ public void testCreate() throws Exception { String processorTag = randomAlphaOfLength(10); SetProcessor setProcessor = factory.create(null, processorTag, null, config); assertThat(setProcessor.getTag(), equalTo(processorTag)); - assertThat(setProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); - assertThat(setProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo("value1")); + assertThat(setProcessor.getField().newInstance(Map.of()).execute(), equalTo("field1")); + assertThat(setProcessor.getValue().copyAndResolve(Map.of()), equalTo("value1")); assertThat(setProcessor.isOverrideEnabled(), equalTo(true)); } @@ -53,8 +52,8 @@ public void testCreateWithOverride() throws Exception { String processorTag = randomAlphaOfLength(10); SetProcessor setProcessor = factory.create(null, processorTag, null, config); assertThat(setProcessor.getTag(), equalTo(processorTag)); - assertThat(setProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); - assertThat(setProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo("value1")); + assertThat(setProcessor.getField().newInstance(Map.of()).execute(), equalTo("field1")); + assertThat(setProcessor.getValue().copyAndResolve(Map.of()), equalTo("value1")); assertThat(setProcessor.isOverrideEnabled(), equalTo(overrideEnabled)); } @@ -113,7 +112,7 @@ public void testCreateWithCopyFrom() throws Exception { String processorTag = randomAlphaOfLength(10); SetProcessor setProcessor = factory.create(null, processorTag, null, config); assertThat(setProcessor.getTag(), equalTo(processorTag)); - assertThat(setProcessor.getField().newInstance(Collections.emptyMap()).execute(), equalTo("field1")); + assertThat(setProcessor.getField().newInstance(Map.of()).execute(), equalTo("field1")); assertThat(setProcessor.getCopyFrom(), equalTo("field2")); } @@ -143,7 +142,7 @@ public void testMediaType() throws Exception { // invalid media type expectedMediaType = randomValueOtherThanMany( - m -> Arrays.asList(ConfigurationUtils.VALID_MEDIA_TYPES).contains(m), + m -> List.of(ConfigurationUtils.VALID_MEDIA_TYPES).contains(m), () -> randomAlphaOfLengthBetween(5, 9) ); final Map config2 = new HashMap<>(); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java index e6477b8940a8..5973e4fe5741 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.ingest.TestTemplateService; import org.elasticsearch.ingest.ValueSource; import org.elasticsearch.test.ESTestCase; -import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Date; @@ -109,7 +108,7 @@ public void testSetMetadataExceptVersion() throws Exception { Processor processor = createSetProcessor(randomMetadata.getFieldName(), "_value", null, true, false); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(randomMetadata.getFieldName(), String.class), Matchers.equalTo("_value")); + assertThat(ingestDocument.getFieldValue(randomMetadata.getFieldName(), String.class), equalTo("_value")); } public void testSetMetadataVersion() throws Exception { @@ -117,7 +116,7 @@ public void testSetMetadataVersion() throws Exception { Processor processor = createSetProcessor(Metadata.VERSION.getFieldName(), version, null, true, false); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(Metadata.VERSION.getFieldName(), Long.class), Matchers.equalTo(version)); + assertThat(ingestDocument.getFieldValue(Metadata.VERSION.getFieldName(), Long.class), equalTo(version)); } public void testSetMetadataVersionType() throws Exception { @@ -125,7 +124,7 @@ public void testSetMetadataVersionType() throws Exception { Processor processor = createSetProcessor(Metadata.VERSION_TYPE.getFieldName(), versionType, null, true, false); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(Metadata.VERSION_TYPE.getFieldName(), String.class), Matchers.equalTo(versionType)); + assertThat(ingestDocument.getFieldValue(Metadata.VERSION_TYPE.getFieldName(), String.class), equalTo(versionType)); } public void testSetMetadataIfSeqNo() throws Exception { @@ -133,7 +132,7 @@ public void testSetMetadataIfSeqNo() throws Exception { Processor processor = createSetProcessor(Metadata.IF_SEQ_NO.getFieldName(), ifSeqNo, null, true, false); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(Metadata.IF_SEQ_NO.getFieldName(), Long.class), Matchers.equalTo(ifSeqNo)); + assertThat(ingestDocument.getFieldValue(Metadata.IF_SEQ_NO.getFieldName(), Long.class), equalTo(ifSeqNo)); } public void testSetMetadataIfPrimaryTerm() throws Exception { @@ -141,7 +140,7 @@ public void testSetMetadataIfPrimaryTerm() throws Exception { Processor processor = createSetProcessor(Metadata.IF_PRIMARY_TERM.getFieldName(), ifPrimaryTerm, null, true, false); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), Long.class), Matchers.equalTo(ifPrimaryTerm)); + assertThat(ingestDocument.getFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), Long.class), equalTo(ifPrimaryTerm)); } public void testSetDynamicTemplates() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorFactoryTests.java index 51abbf305837..0aeae9e6eed4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorFactoryTests.java @@ -15,7 +15,7 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class SortProcessorFactoryTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java index 7d5a64216654..f638f8c12315 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SortProcessorTests.java @@ -15,10 +15,10 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -53,7 +53,7 @@ public void testSortIntegersNonRandom() throws Exception { Integer[] expectedResult = new Integer[] { 1, 2, 3, 4, 5, 10, 20, 21, 22, 50, 100 }; List fieldValue = new ArrayList<>(expectedResult.length); - fieldValue.addAll(Arrays.asList(expectedResult).subList(0, expectedResult.length)); + fieldValue.addAll(List.of(expectedResult).subList(0, expectedResult.length)); Collections.shuffle(fieldValue, random()); String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue); @@ -265,7 +265,7 @@ public void testSortNullValue() throws Exception { } public void testDescendingSortWithTargetField() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); int numItems = randomIntBetween(1, 10); List fieldValue = new ArrayList<>(numItems); List expectedResult = new ArrayList<>(numItems); @@ -285,7 +285,7 @@ public void testDescendingSortWithTargetField() throws Exception { } public void testAscendingSortWithTargetField() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); int numItems = randomIntBetween(1, 10); List fieldValue = new ArrayList<>(numItems); List expectedResult = new ArrayList<>(numItems); @@ -305,8 +305,8 @@ public void testAscendingSortWithTargetField() throws Exception { } public void testSortWithTargetFieldLeavesOriginalUntouched() throws Exception { - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); - List fieldValue = Arrays.asList(1, 5, 4); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); + List fieldValue = List.of(1, 5, 4); List expectedResult = new ArrayList<>(fieldValue); Collections.sort(expectedResult); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java index 891f67bd45a6..10d96b8add96 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorFactoryTests.java @@ -14,7 +14,7 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class SplitProcessorFactoryTests extends ESTestCase { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java index 73c94efdb985..500debd79b96 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SplitProcessorTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.ingest.TestIngestDocument; import org.elasticsearch.test.ESTestCase; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -31,7 +30,7 @@ public void testSplit() throws Exception { String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "127.0.0.1"); Processor processor = new SplitProcessor(randomAlphaOfLength(10), null, fieldName, "\\.", false, false, fieldName); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(Arrays.asList("127", "0", "0", "1"))); + assertThat(ingestDocument.getFieldValue(fieldName, List.class), equalTo(List.of("127", "0", "0", "1"))); } public void testSplitFieldNotFound() throws Exception { @@ -70,7 +69,7 @@ public void testSplitNullValueWithIgnoreMissing() throws Exception { } public void testSplitNonExistentWithIgnoreMissing() throws Exception { - IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); + IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Map.of()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new SplitProcessor(randomAlphaOfLength(10), null, "field", "\\.", true, false, "field"); processor.execute(ingestDocument); @@ -104,11 +103,11 @@ public void testSplitAppendable() throws Exception { splitProcessor.execute(ingestDocument); @SuppressWarnings("unchecked") List flags = (List) ingestDocument.getFieldValue("flags", List.class); - assertThat(flags, equalTo(Arrays.asList("new", "hot", "super", "fun", "interesting"))); + assertThat(flags, equalTo(List.of("new", "hot", "super", "fun", "interesting"))); ingestDocument.appendFieldValue("flags", "additional_flag"); assertThat( ingestDocument.getFieldValue("flags", List.class), - equalTo(Arrays.asList("new", "hot", "super", "fun", "interesting", "additional_flag")) + equalTo(List.of("new", "hot", "super", "fun", "interesting", "additional_flag")) ); } @@ -118,15 +117,15 @@ public void testSplitWithTargetField() throws Exception { String targetFieldName = fieldName + randomAlphaOfLength(5); Processor processor = new SplitProcessor(randomAlphaOfLength(10), null, fieldName, "\\.", false, false, targetFieldName); processor.execute(ingestDocument); - assertThat(ingestDocument.getFieldValue(targetFieldName, List.class), equalTo(Arrays.asList("127", "0", "0", "1"))); + assertThat(ingestDocument.getFieldValue(targetFieldName, List.class), equalTo(List.of("127", "0", "0", "1"))); } public void testSplitWithPreserveTrailing() throws Exception { - doTestSplitWithPreserveTrailing(true, "foo|bar|baz||", Arrays.asList("foo", "bar", "baz", "", "")); + doTestSplitWithPreserveTrailing(true, "foo|bar|baz||", List.of("foo", "bar", "baz", "", "")); } public void testSplitWithoutPreserveTrailing() throws Exception { - doTestSplitWithPreserveTrailing(false, "foo|bar|baz||", Arrays.asList("foo", "bar", "baz")); + doTestSplitWithPreserveTrailing(false, "foo|bar|baz||", List.of("foo", "bar", "baz")); } private void doTestSplitWithPreserveTrailing(boolean preserveTrailing, String fieldValue, List expected) throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorFactoryTests.java index 08e99326330a..a6ba0a32b7bb 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UriPartsProcessorFactoryTests.java @@ -15,7 +15,7 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.equalTo; public class UriPartsProcessorFactoryTests extends ESTestCase { diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml index 60208a52aba3..9ef22ff2ebef 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/140_json.yml @@ -12,6 +12,10 @@ teardown: ingest.delete_pipeline: id: "3" ignore: 404 + - do: + ingest.delete_pipeline: + id: "4" + ignore: 404 --- "Test JSON Processor": @@ -150,3 +154,35 @@ teardown: id: "3" - match: { _source.foo.bar: "baz" } - match: { _source.foo.qux: "quux" } + +--- +"Test JSON Processor lenient parsing": + - do: + ingest.put_pipeline: + id: "4" + body: { + "processors": [ + { + "json" : { + "field" : "message", + "strict_json_parsing": false + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: "4" + pipeline: "4" + body: { + message: "123 foo" + } + + - do: + get: + index: test + id: "4" + - match: { _source.message: 123 } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml index 614b329c699f..d833e0111f83 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml @@ -313,7 +313,7 @@ teardown: "foo": "bar" } } - - match: { error.root_cause.0.reason: "Failed to generate the source document for ingest pipeline [my_pipeline]" } + - match: { error.root_cause.0.reason: "Failed to generate the source document for ingest pipeline [my_pipeline] for document [test/1]" } --- "Test metadata": diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/240_required_pipeline.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/240_final_pipeline.yml similarity index 100% rename from modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/240_required_pipeline.yml rename to modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/240_final_pipeline.yml diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml index 61fc876d8180..594ff52c2b27 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml @@ -177,3 +177,53 @@ teardown: - match: { _source.copied_foo_number: 3 } - is_true: _source.copied_foo_boolean - match: { _source.foo_nochange: "no change" } + +--- +"Test set processor with reflection attempts": + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors" : [ + { + "script": { + "description": "Set a reference to a proper java object so we can attempt reflection", + "lang": "painless", + "source": "ctx.t = metadata().now" + } + }, + { + "set": { + "description": "Attempting to call a method (ZonedDateTime#getHour()) is ignored", + "field": "method_call_is_ignored", + "value": "{{t.hour}}" + } + }, + { + "set": { + "description": "Attempting to call a method that doesn't exist is ignored", + "field": "missing_method_is_ignored", + "value": "{{t.nothing}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + pipeline: "1" + body: { + foo: "hello" + } + + - do: + get: + index: test + id: "1" + - match: { _source.foo: "hello" } + - match: { _source.method_call_is_ignored: "" } + - match: { _source.missing_method_is_ignored: "" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index d7738987de18..c71e13429f72 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -145,6 +145,45 @@ teardown: - is_false: _source.field1 - match: {_source.field2: value2} +--- +"Test bulk request with _none request pipeline and default pipeline": + + - do: + bulk: + pipeline: pipeline1 + body: + - index: + _index: test_index + _id: test_id1 + - f1: v1 + - index: + _index: test_index + _id: test_id2 + pipeline: _none + - f1: v2 + - gte: { ingest_took: 0 } + + - do: + cluster.state: {} + # Get master node id + - set: { master_node: master } + + - do: + get: + index: test_index + id: test_id1 + + - match: {_source.field1: value1} + - is_false: _source.field2 + + - do: + get: + index: test_index + id: test_id2 + + - is_false: _source.field1 + - is_false: _source.field2 + --- "Update with pipeline": - skip: diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 86f59753e4f2..62a78e4ef033 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -8,8 +8,8 @@ import org.apache.tools.ant.taskdefs.condition.Os -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -27,12 +27,12 @@ tasks.named('internalClusterTestTestingConventions').configure { } dependencies { - implementation('com.maxmind.geoip2:geoip2:3.0.0') + implementation('com.maxmind.geoip2:geoip2:4.0.0') // geoip2 dependencies: runtimeOnly("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") runtimeOnly("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}") runtimeOnly("com.fasterxml.jackson.core:jackson-core:${versions.jackson}") - implementation('com.maxmind.db:maxmind-db:2.0.0') + implementation('com.maxmind.db:maxmind-db:3.0.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' internalClusterTestImplementation project(path: ":modules:reindex") diff --git a/modules/ingest-geoip/qa/file-based-update/build.gradle b/modules/ingest-geoip/qa/file-based-update/build.gradle index f465172a55b7..0e396d62eb16 100644 --- a/modules/ingest-geoip/qa/file-based-update/build.gradle +++ b/modules/ingest-geoip/qa/file-based-update/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' testClusters.configureEach { testDistribution = 'DEFAULT' diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index 0e164cab818b..63c14ac4df96 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -27,10 +27,13 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; +import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStatsAction; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.search.SearchHit; @@ -51,11 +54,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -82,7 +87,12 @@ public class GeoIpDownloaderIT extends AbstractGeoIpIT { @Override protected Collection> nodePlugins() { - return Arrays.asList(ReindexPlugin.class, IngestGeoIpPlugin.class, GeoIpProcessorNonIngestNodeIT.IngestGeoIpSettingsPlugin.class); + return Arrays.asList( + ReindexPlugin.class, + IngestGeoIpPlugin.class, + GeoIpProcessorNonIngestNodeIT.IngestGeoIpSettingsPlugin.class, + NonGeoProcessorsPlugin.class + ); } @Override @@ -104,7 +114,7 @@ public void cleanUp() throws Exception { .setPersistentSettings( Settings.builder() .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) - .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey()) .putNull("ingest.geoip.database_validity") ) .get(); @@ -149,6 +159,7 @@ public void cleanUp() throws Exception { @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/75221") public void testInvalidTimestamp() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); + putGeoIpPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -160,7 +171,7 @@ public void testInvalidTimestamp() throws Exception { assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); }, 2, TimeUnit.MINUTES); - putPipeline(); + putGeoIpPipeline(); verifyUpdatedDatabase(); settingsResponse = client().admin() @@ -172,7 +183,9 @@ public void testInvalidTimestamp() throws Exception { settingsResponse = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2))) + .setPersistentSettings( + Settings.builder().put(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2)) + ) .get(); assertTrue(settingsResponse.isAcknowledged()); List geoIpTmpDirs = getGeoIpTmpDirs(); @@ -186,7 +199,7 @@ public void testInvalidTimestamp() throws Exception { } } }); - putPipeline(); + putGeoIpPipeline(); assertBusy(() -> { SimulateDocumentBaseResult result = simulatePipeline(); assertThat(result.getFailure(), nullValue()); @@ -221,7 +234,9 @@ public void testUpdatedTimestamp() throws Exception { ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2))) + .setPersistentSettings( + Settings.builder().put(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey(), TimeValue.timeValueDays(2)) + ) .get(); assertTrue(settingsResponse.isAcknowledged()); assertBusy(() -> assertNotEquals(lastCheck, getGeoIpTaskState().getDatabases().get("GeoLite2-ASN.mmdb").lastCheck())); @@ -229,6 +244,7 @@ public void testUpdatedTimestamp() throws Exception { } public void testGeoIpDatabasesDownload() throws Exception { + putGeoIpPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -238,6 +254,7 @@ public void testGeoIpDatabasesDownload() throws Exception { assertBusy(() -> { GeoIpTaskState state = getGeoIpTaskState(); assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); + putGeoIpPipeline(); // This is to work around the race condition described in #92888 }, 2, TimeUnit.MINUTES); for (String id : List.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb")) { @@ -283,12 +300,39 @@ public void testGeoIpDatabasesDownload() throws Exception { } } + public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { + assumeTrue("only test with fixture to have stable results", ENDPOINT != null); + String pipelineId = randomAlphaOfLength(10); + putGeoIpPipeline(pipelineId); + ClusterUpdateSettingsResponse settingsResponse = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)) + .get(); + assertTrue(settingsResponse.isAcknowledged()); + assertBusy(() -> { + PersistentTasksCustomMetadata.PersistentTask task = getTask(); + assertNotNull(task); + assertNull(task.getState()); + putGeoIpPipeline(); // This is to work around the race condition described in #92888 + }); + putNonGeoipPipeline(pipelineId); + assertBusy(() -> { assertNull(getTask().getState()); }); + putNonGeoipPipeline(pipelineId); + assertNull(getTask().getState()); + putGeoIpPipeline(); + assertBusy(() -> { + GeoIpTaskState state = getGeoIpTaskState(); + assertEquals(Set.of("GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb"), state.getDatabases().keySet()); + }, 2, TimeUnit.MINUTES); + } + @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/69972") public void testUseGeoIpProcessorWithDownloadedDBs() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); setupDatabasesInConfigDirectory(); // setup: - putPipeline(); + putGeoIpPipeline(); // verify before updating dbs { @@ -355,7 +399,7 @@ public void testUseGeoIpProcessorWithDownloadedDBs() throws Exception { @TestLogging(value = "org.elasticsearch.ingest.geoip:TRACE", reason = "https://github.com/elastic/elasticsearch/issues/79074") public void testStartWithNoDatabases() throws Exception { assumeTrue("only test with fixture to have stable results", ENDPOINT != null); - putPipeline(); + putGeoIpPipeline(); // Behaviour without any databases loaded: { @@ -438,7 +482,21 @@ private SimulateDocumentBaseResult simulatePipeline() throws IOException { return (SimulateDocumentBaseResult) simulateResponse.getResults().get(0); } - private void putPipeline() throws IOException { + /** + * This creates a pipeline with a geoip processor, which ought to cause the geoip downloader to begin (assuming it is enabled). + * @throws IOException + */ + private void putGeoIpPipeline() throws IOException { + putGeoIpPipeline("_id"); + } + + /** + * This creates a pipeline named pipelineId with a geoip processor, which ought to cause the geoip downloader to begin (assuming it is + * enabled). + * @param pipelineId The name of the new pipeline with a geoip processor + * @throws IOException + */ + private void putGeoIpPipeline(String pipelineId) throws IOException { BytesReference bytes; try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); @@ -484,7 +542,45 @@ private void putPipeline() throws IOException { builder.endObject(); bytes = BytesReference.bytes(builder); } - assertAcked(client().admin().cluster().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + assertAcked(client().admin().cluster().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); + } + + /** + * This creates a pipeline named pipelineId that does _not_ have a geoip processor. + * @throws IOException + */ + private void putNonGeoipPipeline(String pipelineId) throws IOException { + BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + builder.startObject(); + { + builder.startObject(NonGeoProcessorsPlugin.NON_GEO_PROCESSOR_TYPE); + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(client().admin().cluster().preparePutPipeline(pipelineId, bytes, XContentType.JSON).get()); } private List getGeoIpTmpDirs() throws IOException { @@ -624,4 +720,32 @@ public int read(byte[] b, int off, int len) throws IOException { return read; } } + + /** + * This class defines a processor of type "test". + */ + public static final class NonGeoProcessorsPlugin extends Plugin implements IngestPlugin { + public static final String NON_GEO_PROCESSOR_TYPE = "test"; + + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map procMap = new HashMap<>(); + procMap.put(NON_GEO_PROCESSOR_TYPE, (factories, tag, description, config) -> new AbstractProcessor(tag, description) { + @Override + public void execute(IngestDocument ingestDocument, BiConsumer handler) {} + + @Override + public String getType() { + return NON_GEO_PROCESSOR_TYPE; + } + + @Override + public boolean isAsync() { + return false; + } + + }); + return procMap; + } + } } diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java index 288547b6a72d..eea763351dd0 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderStatsIT.java @@ -20,6 +20,8 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; import java.io.IOException; @@ -29,6 +31,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -66,6 +69,11 @@ public void disableDownloader() { } public void testStats() throws Exception { + /* + * Testing without the geoip endpoint fixture falls back to https://storage.googleapis.com/, which can cause this test to run too + * slowly to pass. + */ + assumeTrue("only test with fixture to have stable results", ENDPOINT != null); GeoIpDownloaderStatsAction.Request req = new GeoIpDownloaderStatsAction.Request(); GeoIpDownloaderStatsAction.Response response = client().execute(GeoIpDownloaderStatsAction.INSTANCE, req).actionGet(); XContentTestUtils.JsonMapView jsonMapView = new XContentTestUtils.JsonMapView(convertToMap(response)); @@ -75,7 +83,7 @@ public void testStats() throws Exception { assertThat(jsonMapView.get("stats.databases_count"), equalTo(0)); assertThat(jsonMapView.get("stats.total_download_time"), equalTo(0)); assertEquals(0, jsonMapView.>get("nodes").size()); - + putPipeline(); ClusterUpdateSettingsResponse settingsResponse = client().admin() .cluster() .prepareUpdateSettings() @@ -103,6 +111,33 @@ public void testStats() throws Exception { }); } + private void putPipeline() throws IOException { + BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + assertAcked(client().admin().cluster().preparePutPipeline("_id", bytes, XContentType.JSON).get()); + } + public static Map convertToMap(ToXContent part) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); part.toXContent(builder, EMPTY_PARAMS); diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java index 60be668272b2..83fde48b39f3 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java @@ -48,7 +48,7 @@ public void cleanUp() throws Exception { .setPersistentSettings( Settings.builder() .putNull(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey()) - .putNull(GeoIpDownloader.POLL_INTERVAL_SETTING.getKey()) + .putNull(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getKey()) .putNull("ingest.geoip.database_validity") ) .get() diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java index 75b9e1eb9fce..178cfe0a20c4 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpProcessorNonIngestNodeIT.java @@ -112,9 +112,8 @@ public void testLazyLoading() throws IOException { } private void assertDatabaseLoadStatus(final String node, final boolean loaded) { - final IngestService ingestService = internalCluster().getInstance(IngestService.class, node); - final GeoIpProcessor.Factory factory = (GeoIpProcessor.Factory) ingestService.getProcessorFactories().get("geoip"); - for (final DatabaseReaderLazyLoader loader : factory.getAllDatabases()) { + final DatabaseNodeService databaseNodeService = internalCluster().getInstance(DatabaseNodeService.class, node); + for (final DatabaseReaderLazyLoader loader : databaseNodeService.getAllDatabases()) { if (loaded) { assertNotNull(loader.databaseReader.get()); } else { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java index 56abe71f63c0..8d8b0b4215b3 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/ReloadingDatabasesWhilePerformingGeoLookupsIT.java @@ -64,10 +64,10 @@ public class ReloadingDatabasesWhilePerformingGeoLookupsIT extends ESTestCase { public void test() throws Exception { Path geoIpConfigDir = createTempDir(); Path geoIpTmpDir = createTempDir(); - DatabaseNodeService databaseNodeService = createRegistry(geoIpConfigDir, geoIpTmpDir); ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + DatabaseNodeService databaseNodeService = createRegistry(geoIpConfigDir, geoIpTmpDir, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City.mmdb")); Files.copy(ConfigDatabases.class.getResourceAsStream("/GeoLite2-City-Test.mmdb"), geoIpTmpDir.resolve("GeoLite2-City-Test.mmdb")); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoIpTmpDir.resolve("GeoLite2-City.mmdb")); @@ -190,7 +190,8 @@ public void test() throws Exception { IOUtils.rm(geoIpConfigDir, geoIpTmpDir); } - private static DatabaseNodeService createRegistry(Path geoIpConfigDir, Path geoIpTmpDir) throws IOException { + private static DatabaseNodeService createRegistry(Path geoIpConfigDir, Path geoIpTmpDir, ClusterService clusterService) + throws IOException { GeoIpCache cache = new GeoIpCache(0); ConfigDatabases configDatabases = new ConfigDatabases(geoIpConfigDir, cache); copyDatabaseFiles(geoIpConfigDir, configDatabases); @@ -199,9 +200,10 @@ private static DatabaseNodeService createRegistry(Path geoIpConfigDir, Path geoI mock(Client.class), cache, configDatabases, - Runnable::run + Runnable::run, + clusterService ); - databaseNodeService.initialize("nodeId", mock(ResourceWatcherService.class), mock(IngestService.class), mock(ClusterService.class)); + databaseNodeService.initialize("nodeId", mock(ResourceWatcherService.class), mock(IngestService.class)); return databaseNodeService; } diff --git a/modules/ingest-geoip/src/main/java/module-info.java b/modules/ingest-geoip/src/main/java/module-info.java index 08f12becdf88..fa0b0266414f 100644 --- a/modules/ingest-geoip/src/main/java/module-info.java +++ b/modules/ingest-geoip/src/main/java/module-info.java @@ -12,8 +12,8 @@ requires org.elasticsearch.xcontent; requires org.apache.logging.log4j; requires org.apache.lucene.core; - requires geoip2; - requires maxmind.db; + requires com.maxmind.geoip2; + requires com.maxmind.db; exports org.elasticsearch.ingest.geoip.stats to org.elasticsearch.server; } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java index 492b329e1976..e96197ab59ff 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.IOUtils; @@ -63,6 +64,7 @@ import java.util.zip.GZIPInputStream; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; /** * A component that is responsible for making the databases maintained by {@link GeoIpDownloader} @@ -77,13 +79,13 @@ * 2) For each database check whether the databases have changed * by comparing the local and remote md5 hash or are locally missing. * 3) For each database identified in step 2 start downloading the database - * chunks. Each chunks is appended to a tmp file (inside geoip tmp dir) and + * chunks. Each chunk is appended to a tmp file (inside geoip tmp dir) and * after all chunks have been downloaded, the database is uncompressed and * renamed to the final filename.After this the database is loaded and * if there is an old instance of this database then that is closed. * 4) Cleanup locally loaded databases that are no longer mentioned in {@link GeoIpTaskState}. */ -public final class DatabaseNodeService implements Closeable { +public final class DatabaseNodeService implements GeoIpDatabaseProvider, Closeable { private static final Logger LOGGER = LogManager.getLogger(DatabaseNodeService.class); @@ -93,34 +95,45 @@ public final class DatabaseNodeService implements Closeable { private Path geoipTmpDirectory; private final ConfigDatabases configDatabases; private final Consumer genericExecutor; + private final ClusterService clusterService; private IngestService ingestService; private final ConcurrentMap databases = new ConcurrentHashMap<>(); - DatabaseNodeService(Environment environment, Client client, GeoIpCache cache, Consumer genericExecutor) { + DatabaseNodeService( + Environment environment, + Client client, + GeoIpCache cache, + Consumer genericExecutor, + ClusterService clusterService + ) { this( environment.tmpFile(), new OriginSettingClient(client, IngestService.INGEST_ORIGIN), cache, new ConfigDatabases(environment, cache), - genericExecutor + genericExecutor, + clusterService ); } - DatabaseNodeService(Path tmpDir, Client client, GeoIpCache cache, ConfigDatabases configDatabases, Consumer genericExecutor) { + DatabaseNodeService( + Path tmpDir, + Client client, + GeoIpCache cache, + ConfigDatabases configDatabases, + Consumer genericExecutor, + ClusterService clusterService + ) { this.client = client; this.cache = cache; this.geoipTmpBaseDirectory = tmpDir.resolve("geoip-databases"); this.configDatabases = configDatabases; this.genericExecutor = genericExecutor; + this.clusterService = clusterService; } - public void initialize( - String nodeId, - ResourceWatcherService resourceWatcher, - IngestService ingestServiceArg, - ClusterService clusterService - ) throws IOException { + public void initialize(String nodeId, ResourceWatcherService resourceWatcher, IngestService ingestServiceArg) throws IOException { configDatabases.initialize(resourceWatcher); geoipTmpDirectory = geoipTmpBaseDirectory.resolve(nodeId); Files.walkFileTree(geoipTmpDirectory, new FileVisitor<>() { @@ -161,7 +174,35 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) { clusterService.addListener(event -> checkDatabases(event.state())); } - public DatabaseReaderLazyLoader getDatabase(String name) { + @Override + public Boolean isValid(String databaseFile) { + ClusterState currentState = clusterService.state(); + assert currentState != null; + + PersistentTasksCustomMetadata.PersistentTask task = getTaskWithId(currentState, GeoIpDownloader.GEOIP_DOWNLOADER); + if (task == null || task.getState() == null) { + return true; + } + GeoIpTaskState state = (GeoIpTaskState) task.getState(); + GeoIpTaskState.Metadata metadata = state.getDatabases().get(databaseFile); + // we never remove metadata from cluster state, if metadata is null we deal with built-in database, which is always valid + if (metadata == null) { + return true; + } + + boolean valid = metadata.isValid(currentState.metadata().settings()); + if (valid && metadata.isCloseToExpiration()) { + HeaderWarning.addWarning( + "database [{}] was not updated for over 25 days, geoip processor" + " will stop working if there is no update for 30 days", + databaseFile + ); + } + + return valid; + } + + // for testing only: + DatabaseReaderLazyLoader getDatabaseReaderLazyLoader(String name) { // There is a need for reference counting in order to avoid using an instance // that gets closed while using it. (this can happen during a database update) while (true) { @@ -174,6 +215,11 @@ public DatabaseReaderLazyLoader getDatabase(String name) { } } + @Override + public GeoIpDatabase getDatabase(String name) { + return getDatabaseReaderLazyLoader(name); + } + List getAllDatabases() { List all = new ArrayList<>(configDatabases.getConfigDatabases().values()); this.databases.forEach((key, value) -> all.add(value)); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java index 13181e1f96bb..3519113033b4 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java @@ -19,7 +19,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.core.Booleans; @@ -34,8 +33,6 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; @@ -44,7 +41,7 @@ * Facilitates lazy loading of the database reader, so that when the geoip plugin is installed, but not used, * no memory is being wasted on the database reader. */ -class DatabaseReaderLazyLoader implements Closeable { +class DatabaseReaderLazyLoader implements GeoIpDatabase, Closeable { private static final boolean LOAD_DATABASE_ON_HEAP = Booleans.parseBoolean(System.getProperty("es.geoip.load_db_on_heap", "false")); @@ -84,7 +81,8 @@ class DatabaseReaderLazyLoader implements Closeable { * @return the database type * @throws IOException if an I/O exception occurs reading the database type */ - final String getDatabaseType() throws IOException { + @Override + public final String getDatabaseType() throws IOException { if (databaseType.get() == null) { synchronized (databaseType) { if (databaseType.get() == null) { @@ -154,17 +152,20 @@ InputStream databaseInputStream() throws IOException { } @Nullable - CityResponse getCity(InetAddress ipAddress) { + @Override + public CityResponse getCity(InetAddress ipAddress) { return getResponse(ipAddress, DatabaseReader::tryCity); } @Nullable - CountryResponse getCountry(InetAddress ipAddress) { + @Override + public CountryResponse getCountry(InetAddress ipAddress) { return getResponse(ipAddress, DatabaseReader::tryCountry); } @Nullable - AsnResponse getAsn(InetAddress ipAddress) { + @Override + public AsnResponse getAsn(InetAddress ipAddress) { return getResponse(ipAddress, DatabaseReader::tryAsn); } @@ -172,7 +173,8 @@ boolean preLookup() { return currentUsages.updateAndGet(current -> current < 0 ? current : current + 1) > 0; } - void postLookup() throws IOException { + @Override + public void release() throws IOException { if (currentUsages.updateAndGet(current -> current > 0 ? current - 1 : current + 1) == -1) { doClose(); } @@ -187,14 +189,13 @@ private T getResponse( InetAddress ipAddress, CheckedBiFunction, Exception> responseProvider ) { - SpecialPermission.check(); - return AccessController.doPrivileged((PrivilegedAction) () -> cache.putIfAbsent(ipAddress, databasePath.toString(), ip -> { + return cache.putIfAbsent(ipAddress, databasePath.toString(), ip -> { try { return responseProvider.apply(get(), ipAddress).orElse(null); } catch (Exception e) { throw new RuntimeException(e); } - })); + }); } DatabaseReader get() throws IOException { @@ -225,7 +226,8 @@ public void close() throws IOException { } } - private void doClose() throws IOException { + // Visible for Testing + protected void doClose() throws IOException { IOUtils.close(databaseReader.get()); int numEntriesEvicted = cache.purgeCacheEntriesForDatabase(databasePath); LOGGER.info("evicted [{}] entries from cache after reloading database [{}]", numEntriesEvicted, databasePath); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java index 01938c617db6..30c0fcb74833 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpCache.java @@ -15,7 +15,6 @@ import java.net.InetAddress; import java.nio.file.Path; -import java.util.Objects; import java.util.function.Function; /** @@ -82,29 +81,5 @@ public int count() { * path is needed to be included in the cache key. For example, if we only used the IP address as the key the City and ASN the same * IP may be in both with different values and we need to cache both. */ - private static class CacheKey { - - private final InetAddress ip; - private final String databasePath; - - private CacheKey(InetAddress ip, String databasePath) { - this.ip = ip; - this.databasePath = databasePath; - } - - // generated - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - CacheKey cacheKey = (CacheKey) o; - return Objects.equals(ip, cacheKey.ip) && Objects.equals(databasePath, cacheKey.databasePath); - } - - // generated - @Override - public int hashCode() { - return Objects.hash(ip, databasePath); - } - } + private record CacheKey(InetAddress ip, String databasePath) {} } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java new file mode 100644 index 000000000000..0bcaab6e6915 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabase.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import com.maxmind.geoip2.model.AsnResponse; +import com.maxmind.geoip2.model.CityResponse; +import com.maxmind.geoip2.model.CountryResponse; + +import org.elasticsearch.core.Nullable; + +import java.io.IOException; +import java.net.InetAddress; + +/** + * Provides a uniform interface for interacting with various GeoIP databases. + */ +public interface GeoIpDatabase { + + /** + * @return the database type as it is detailed in the database file metadata + * @throws IOException if the database file could not be read or the data could not be accessed + */ + String getDatabaseType() throws IOException; + + /** + * @param ipAddress the IP address to look up + * @return a response containing the city data for the given address if it exists, or null if it could not be found + * @throws UnsupportedOperationException may be thrown if the implementation does not support retrieving city data + */ + @Nullable + CityResponse getCity(InetAddress ipAddress); + + /** + * @param ipAddress the IP address to look up + * @return a response containing the country data for the given address if it exists, or null if it could not be found + * @throws UnsupportedOperationException may be thrown if the implementation does not support retrieving country data + */ + @Nullable + CountryResponse getCountry(InetAddress ipAddress); + + /** + * @param ipAddress the IP address to look up + * @return a response containing the Autonomous System Number for the given address if it exists, or null if it could not + * be found + * @throws UnsupportedOperationException may be thrown if the implementation does not support retrieving ASN data + */ + @Nullable + AsnResponse getAsn(InetAddress ipAddress); + + /** + * Releases the current database object. Called after processing a single document. Databases should be closed or returned to a + * resource pool. No further interactions should be expected. + * @throws IOException if the implementation encounters any problem while cleaning up + */ + void release() throws IOException; +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabaseProvider.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabaseProvider.java new file mode 100644 index 000000000000..6baa95904577 --- /dev/null +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDatabaseProvider.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +/** + * Provides construction and initialization logic for {@link GeoIpDatabase} instances. + */ +public interface GeoIpDatabaseProvider { + + /** + * Determines if the given database name corresponds to an expired database. Expired databases will not be loaded. + *

+ * Verifying database expiration is left to each provider implementation to determine. A return value of false does not + * preclude the possibility of a provider returning true in the future. + * + * @param name the name of the database to provide. + * @return false IFF the requested database file is expired, + * true for all other cases (including unknown file name, file missing, wrong database type, etc). + */ + Boolean isValid(String name); + + /** + * @param name the name of the database to provide. Default database names that should always be supported are listed in + * {@link IngestGeoIpPlugin#DEFAULT_DATABASE_FILENAMES}. + * @return a ready-to-use database instance, or null if no database could be loaded. + */ + GeoIpDatabase getDatabase(String name); +} diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 6776ab9d629a..0732674632b3 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; @@ -48,6 +47,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Supplier; /** * Main component responsible for downloading new GeoIP databases. @@ -59,14 +59,6 @@ public class GeoIpDownloader extends AllocatedPersistentTask { private static final Logger logger = LogManager.getLogger(GeoIpDownloader.class); - public static final Setting POLL_INTERVAL_SETTING = Setting.timeSetting( - "ingest.geoip.downloader.poll.interval", - TimeValue.timeValueDays(3), - TimeValue.timeValueDays(1), - Property.Dynamic, - Property.NodeScope - ); - // for overriding in tests private static final String DEFAULT_ENDPOINT = System.getProperty( "ingest.geoip.downloader.endpoint.default", @@ -91,9 +83,16 @@ public class GeoIpDownloader extends AllocatedPersistentTask { // visible for testing protected volatile GeoIpTaskState state; - private volatile TimeValue pollInterval; private volatile Scheduler.ScheduledCancellable scheduled; private volatile GeoIpDownloaderStats stats = GeoIpDownloaderStats.EMPTY; + private final Supplier pollIntervalSupplier; + private final Supplier eagerDownloadSupplier; + /* + * This variable tells us whether we have at least one pipeline with a geoip processor. If there are no geoip processors then we do + * not download geoip databases (unless configured to eagerly download). Access is not protected because it is set in the constructor + * and then only ever updated on the cluster state update thread (it is also read on the generic thread). Non-private for unit testing. + */ + private final Supplier atLeastOneGeoipProcessorSupplier; GeoIpDownloader( Client client, @@ -106,7 +105,10 @@ public class GeoIpDownloader extends AllocatedPersistentTask { String action, String description, TaskId parentTask, - Map headers + Map headers, + Supplier pollIntervalSupplier, + Supplier eagerDownloadSupplier, + Supplier atLeastOneGeoipProcessorSupplier ) { super(id, type, action, description, parentTask, headers); this.httpClient = httpClient; @@ -114,15 +116,9 @@ public class GeoIpDownloader extends AllocatedPersistentTask { this.clusterService = clusterService; this.threadPool = threadPool; endpoint = ENDPOINT_SETTING.get(settings); - pollInterval = POLL_INTERVAL_SETTING.get(settings); - clusterService.getClusterSettings().addSettingsUpdateConsumer(POLL_INTERVAL_SETTING, this::setPollInterval); - } - - public void setPollInterval(TimeValue pollInterval) { - this.pollInterval = pollInterval; - if (scheduled != null && scheduled.cancel()) { - scheduleNextRun(TimeValue.ZERO); - } + this.pollIntervalSupplier = pollIntervalSupplier; + this.eagerDownloadSupplier = eagerDownloadSupplier; + this.atLeastOneGeoipProcessorSupplier = atLeastOneGeoipProcessorSupplier; } // visible for testing @@ -130,6 +126,7 @@ void updateDatabases() throws IOException { var clusterState = clusterService.state(); var geoipIndex = clusterState.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX); if (geoipIndex != null) { + logger.trace("The {} index is not null", GeoIpDownloader.DATABASES_INDEX); if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) { throw new ElasticsearchException("not all primary shards of [" + DATABASES_INDEX + "] index are active"); } @@ -138,13 +135,18 @@ void updateDatabases() throws IOException { throw blockException; } } - - logger.debug("updating geoip databases"); - List> response = fetchDatabasesOverview(); - for (Map res : response) { - if (res.get("name").toString().endsWith(".tgz")) { - processDatabase(res); + if (eagerDownloadSupplier.get() || atLeastOneGeoipProcessorSupplier.get()) { + logger.trace("Updating geoip databases"); + List> response = fetchDatabasesOverview(); + for (Map res : response) { + if (res.get("name").toString().endsWith(".tgz")) { + processDatabase(res); + } } + } else { + logger.trace( + "Not updating geoip databases because no geoip processors exist in the cluster and eager downloading is not configured" + ); } } @@ -186,7 +188,7 @@ void processDatabase(Map databaseInfo) { } } catch (Exception e) { stats = stats.failedDownload(); - logger.error((Supplier) () -> "error downloading geoip database [" + name + "]", e); + logger.error((org.apache.logging.log4j.util.Supplier) () -> "error downloading geoip database [" + name + "]", e); } } @@ -266,6 +268,9 @@ void setState(GeoIpTaskState state) { this.state = state; } + /** + * Downloads the geoip databases now, and schedules them to be downloaded again after pollInterval. + */ void runDownloader() { if (isCancelled() || isCompleted()) { return; @@ -281,7 +286,22 @@ void runDownloader() { } catch (Exception e) { logger.error("exception during geoip databases cleanup", e); } - scheduleNextRun(pollInterval); + scheduleNextRun(pollIntervalSupplier.get()); + } + + /** + * This method requests that the downloader be rescheduled to run immediately (presumably because a dynamic property supplied by + * pollIntervalSupplier or eagerDownloadSupplier has changed, or a pipeline with a geoip processor has been added). This method does + * nothing if this task is cancelled, completed, or has not yet been scheduled to run for the first time. It cancels any existing + * scheduled run. + */ + public void requestReschedule() { + if (isCancelled() || isCompleted()) { + return; + } + if (scheduled != null && scheduled.cancel()) { + scheduleNextRun(TimeValue.ZERO); + } } private void cleanDatabases() { @@ -321,4 +341,5 @@ private void scheduleNextRun(TimeValue time) { scheduled = threadPool.schedule(this::runDownloader, time, ThreadPool.Names.GENERIC); } } + } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index c56fd9c2d0c5..7457738b7530 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -17,15 +17,20 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; +import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.ingest.IngestService; +import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -35,7 +40,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; @@ -56,6 +64,20 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor POLL_INTERVAL_SETTING = Setting.timeSetting( + "ingest.geoip.downloader.poll.interval", + TimeValue.timeValueDays(3), + TimeValue.timeValueDays(1), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting EAGER_DOWNLOAD_SETTING = Setting.boolSetting( + "ingest.geoip.downloader.eager.download", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); private static final Logger logger = LogManager.getLogger(GeoIpDownloader.class); @@ -66,6 +88,10 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor currentTask = new AtomicReference<>(); + private volatile TimeValue pollInterval; + private volatile boolean eagerDownload; + private volatile boolean atLeastOneGeoipProcessor; + private final AtomicBoolean taskIsBootstrapped = new AtomicBoolean(false); GeoIpDownloaderTaskExecutor(Client client, HttpClient httpClient, ClusterService clusterService, ThreadPool threadPool) { super(GEOIP_DOWNLOADER, ThreadPool.Names.GENERIC); @@ -75,9 +101,18 @@ public final class GeoIpDownloaderTaskExecutor extends PersistentTasksExecutor pollInterval, + () -> eagerDownload, + () -> atLeastOneGeoipProcessor ); } @@ -140,12 +198,65 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - clusterService.removeListener(this); - if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { - startTask(() -> clusterService.addListener(this)); - } else { - stopTask(() -> clusterService.addListener(this)); + if (taskIsBootstrapped.getAndSet(true) == false) { + this.atLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); + if (ENABLED_SETTING.get(event.state().getMetadata().settings(), settings)) { + startTask(() -> taskIsBootstrapped.set(false)); + } else { + stopTask(() -> taskIsBootstrapped.set(false)); + } } + + if (event.metadataChanged() && event.changedCustomMetadataSet().contains(IngestMetadata.TYPE)) { + boolean newAtLeastOneGeoipProcessor = hasAtLeastOneGeoipProcessor(event.state()); + if (newAtLeastOneGeoipProcessor && atLeastOneGeoipProcessor == false) { + atLeastOneGeoipProcessor = true; + logger.trace("Scheduling runDownloader because a geoip processor has been added"); + GeoIpDownloader currentDownloader = getCurrentTask(); + if (currentDownloader != null) { + currentDownloader.requestReschedule(); + } + } else { + atLeastOneGeoipProcessor = newAtLeastOneGeoipProcessor; + } + } + } + + @SuppressWarnings("unchecked") + static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { + List pipelineDefinitions = IngestService.getPipelines(clusterState); + return pipelineDefinitions.stream().anyMatch(pipelineDefinition -> { + Map pipelineMap = pipelineDefinition.getConfigAsMap(); + return hasAtLeastOneGeoipProcessor((List>) pipelineMap.get(Pipeline.PROCESSORS_KEY)); + }); + } + + private static boolean hasAtLeastOneGeoipProcessor(List> processors) { + return processors != null && processors.stream().anyMatch(GeoIpDownloaderTaskExecutor::hasAtLeastOneGeoipProcessor); + } + + private static boolean hasAtLeastOneGeoipProcessor(Map processor) { + return processor != null + && (processor.containsKey(GeoIpProcessor.TYPE) + || isProcessorWithOnFailureGeoIpProcessor(processor) + || isForeachProcessorWithGeoipProcessor(processor)); + } + + @SuppressWarnings("unchecked") + private static boolean isProcessorWithOnFailureGeoIpProcessor(Map processor) { + return processor != null + && processor.values() + .stream() + .anyMatch( + value -> value instanceof Map + && hasAtLeastOneGeoipProcessor(((Map>>) value).get("on_failure")) + ); + } + + @SuppressWarnings("unchecked") + private static boolean isForeachProcessorWithGeoipProcessor(Map processor) { + return processor.containsKey("foreach") + && hasAtLeastOneGeoipProcessor(((Map>) processor.get("foreach")).get("processor")); } private void startTask(Runnable onFailure) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 1e9185f17594..eadad3673eba 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -20,18 +20,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; import java.io.IOException; import java.net.InetAddress; @@ -50,7 +46,6 @@ import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty; import static org.elasticsearch.ingest.ConfigurationUtils.readOptionalList; import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; public final class GeoIpProcessor extends AbstractProcessor { @@ -66,7 +61,7 @@ public final class GeoIpProcessor extends AbstractProcessor { private final String field; private final Supplier isValid; private final String targetField; - private final CheckedSupplier supplier; + private final CheckedSupplier supplier; private final Set properties; private final boolean ignoreMissing; private final boolean firstOnly; @@ -74,22 +69,22 @@ public final class GeoIpProcessor extends AbstractProcessor { /** * Construct a geo-IP processor. - * @param tag the processor tag + * @param tag the processor tag * @param description the processor description * @param field the source field to geo-IP map * @param supplier a supplier of a geo-IP database reader; ideally this is lazily-loaded once on first use - * @param isValid + * @param isValid a supplier that determines if the available database files are up-to-date and license compliant * @param targetField the target field * @param properties the properties; ideally this is lazily-loaded once on first use * @param ignoreMissing true if documents with a missing value for the field should be ignored * @param firstOnly true if only first result should be returned in case of array - * @param databaseFile + * @param databaseFile the name of the database file being queried; used only for tagging documents if the database is unavailable */ GeoIpProcessor( final String tag, final String description, final String field, - final CheckedSupplier supplier, + final CheckedSupplier supplier, final Supplier isValid, final String targetField, final Set properties, @@ -125,70 +120,70 @@ public IngestDocument execute(IngestDocument ingestDocument) throws IOException throw new IllegalArgumentException("field [" + field + "] is null, cannot extract geoip information."); } - DatabaseReaderLazyLoader lazyLoader = this.supplier.get(); - if (lazyLoader == null) { + GeoIpDatabase geoIpDatabase = this.supplier.get(); + if (geoIpDatabase == null) { if (ignoreMissing == false) { tag(ingestDocument, databaseFile); } return ingestDocument; } - if (ip instanceof String ipString) { - Map geoData = getGeoData(lazyLoader, ipString); - if (geoData.isEmpty() == false) { - ingestDocument.setFieldValue(targetField, geoData); - } - } else if (ip instanceof List ipList) { - boolean match = false; - List> geoDataList = new ArrayList<>(ipList.size()); - for (Object ipAddr : ipList) { - if (ipAddr instanceof String == false) { - throw new IllegalArgumentException("array in field [" + field + "] should only contain strings"); + try { + if (ip instanceof String ipString) { + Map geoData = getGeoData(geoIpDatabase, ipString); + if (geoData.isEmpty() == false) { + ingestDocument.setFieldValue(targetField, geoData); } - Map geoData = getGeoData(lazyLoader, (String) ipAddr); - if (geoData.isEmpty()) { - geoDataList.add(null); - continue; + } else if (ip instanceof List ipList) { + boolean match = false; + List> geoDataList = new ArrayList<>(ipList.size()); + for (Object ipAddr : ipList) { + if (ipAddr instanceof String == false) { + throw new IllegalArgumentException("array in field [" + field + "] should only contain strings"); + } + Map geoData = getGeoData(geoIpDatabase, (String) ipAddr); + if (geoData.isEmpty()) { + geoDataList.add(null); + continue; + } + if (firstOnly) { + ingestDocument.setFieldValue(targetField, geoData); + return ingestDocument; + } + match = true; + geoDataList.add(geoData); } - if (firstOnly) { - ingestDocument.setFieldValue(targetField, geoData); - return ingestDocument; + if (match) { + ingestDocument.setFieldValue(targetField, geoDataList); } - match = true; - geoDataList.add(geoData); - } - if (match) { - ingestDocument.setFieldValue(targetField, geoDataList); + } else { + throw new IllegalArgumentException("field [" + field + "] should contain only string or array of strings"); } - } else { - throw new IllegalArgumentException("field [" + field + "] should contain only string or array of strings"); + } finally { + geoIpDatabase.release(); } return ingestDocument; } - private Map getGeoData(DatabaseReaderLazyLoader lazyLoader, String ip) throws IOException { - try { - final String databaseType = lazyLoader.getDatabaseType(); - final InetAddress ipAddress = InetAddresses.forString(ip); - Map geoData; - if (databaseType.endsWith(CITY_DB_SUFFIX)) { - geoData = retrieveCityGeoData(lazyLoader, ipAddress); - } else if (databaseType.endsWith(COUNTRY_DB_SUFFIX)) { - geoData = retrieveCountryGeoData(lazyLoader, ipAddress); + private Map getGeoData(GeoIpDatabase geoIpDatabase, String ip) throws IOException { + final String databaseType = geoIpDatabase.getDatabaseType(); + final InetAddress ipAddress = InetAddresses.forString(ip); + Map geoData; + if (databaseType.endsWith(CITY_DB_SUFFIX)) { + geoData = retrieveCityGeoData(geoIpDatabase, ipAddress); + } else if (databaseType.endsWith(COUNTRY_DB_SUFFIX)) { + geoData = retrieveCountryGeoData(geoIpDatabase, ipAddress); - } else if (databaseType.endsWith(ASN_DB_SUFFIX)) { - geoData = retrieveAsnGeoData(lazyLoader, ipAddress); + } else if (databaseType.endsWith(ASN_DB_SUFFIX)) { + geoData = retrieveAsnGeoData(geoIpDatabase, ipAddress); - } else { - throw new ElasticsearchParseException( - "Unsupported database type [" + lazyLoader.getDatabaseType() + "]", - new IllegalStateException() - ); - } - return geoData; - } finally { - lazyLoader.postLookup(); + } else { + throw new ElasticsearchParseException( + "Unsupported database type [" + geoIpDatabase.getDatabaseType() + "]", + new IllegalStateException() + ); } + return geoData; } @Override @@ -212,8 +207,8 @@ Set getProperties() { return properties; } - private Map retrieveCityGeoData(DatabaseReaderLazyLoader lazyLoader, InetAddress ipAddress) { - CityResponse response = lazyLoader.getCity(ipAddress); + private Map retrieveCityGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) { + CityResponse response = geoIpDatabase.getCity(ipAddress); if (response == null) { return Map.of(); } @@ -288,8 +283,8 @@ private Map retrieveCityGeoData(DatabaseReaderLazyLoader lazyLoa return geoData; } - private Map retrieveCountryGeoData(DatabaseReaderLazyLoader lazyLoader, InetAddress ipAddress) { - CountryResponse response = lazyLoader.getCountry(ipAddress); + private Map retrieveCountryGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) { + CountryResponse response = geoIpDatabase.getCountry(ipAddress); if (response == null) { return Map.of(); } @@ -323,8 +318,8 @@ private Map retrieveCountryGeoData(DatabaseReaderLazyLoader lazy return geoData; } - private Map retrieveAsnGeoData(DatabaseReaderLazyLoader lazyLoader, InetAddress ipAddress) { - AsnResponse response = lazyLoader.getAsn(ipAddress); + private Map retrieveAsnGeoData(GeoIpDatabase geoIpDatabase, InetAddress ipAddress) { + AsnResponse response = geoIpDatabase.getAsn(ipAddress); if (response == null) { return Map.of(); } @@ -358,6 +353,40 @@ private Map retrieveAsnGeoData(DatabaseReaderLazyLoader lazyLoad return geoData; } + /** + * Retrieves and verifies a {@link GeoIpDatabase} instance for each execution of the {@link GeoIpProcessor}. Guards against missing + * custom databases, and ensures that database instances are of the proper type before use. + */ + public static final class DatabaseVerifyingSupplier implements CheckedSupplier { + private final GeoIpDatabaseProvider geoIpDatabaseProvider; + private final String databaseFile; + private final String databaseType; + + public DatabaseVerifyingSupplier(GeoIpDatabaseProvider geoIpDatabaseProvider, String databaseFile, String databaseType) { + this.geoIpDatabaseProvider = geoIpDatabaseProvider; + this.databaseFile = databaseFile; + this.databaseType = databaseType; + } + + @Override + public GeoIpDatabase get() throws IOException { + GeoIpDatabase loader = geoIpDatabaseProvider.getDatabase(databaseFile); + if (Factory.useDatabaseUnavailableProcessor(loader, databaseFile)) { + return null; + } else if (loader == null) { + throw new ResourceNotFoundException("database file [" + databaseFile + "] doesn't exist"); + } + // Only check whether the suffix has changed and not the entire database type. + // To sanity check whether a city db isn't overwriting with a country or asn db. + // For example overwriting a geoip lite city db with geoip city db is a valid change, but the db type is slightly different, + // by checking just the suffix this assertion doesn't fail. + String expectedSuffix = databaseType.substring(databaseType.lastIndexOf('-')); + assert loader.getDatabaseType().endsWith(expectedSuffix) + : "database type [" + loader.getDatabaseType() + "] doesn't match with expected suffix [" + expectedSuffix + "]"; + return loader; + } + } + public static final class Factory implements Processor.Factory { static final Set DEFAULT_CITY_PROPERTIES = Collections.unmodifiableSet( EnumSet.of( @@ -377,16 +406,10 @@ public static final class Factory implements Processor.Factory { EnumSet.of(Property.IP, Property.ASN, Property.ORGANIZATION_NAME, Property.NETWORK) ); - private final DatabaseNodeService databaseNodeService; - private final ClusterService clusterService; - - List getAllDatabases() { - return databaseNodeService.getAllDatabases(); - } + private final GeoIpDatabaseProvider geoIpDatabaseProvider; - public Factory(DatabaseNodeService databaseNodeService, ClusterService clusterService) { - this.databaseNodeService = databaseNodeService; - this.clusterService = clusterService; + public Factory(GeoIpDatabaseProvider geoIpDatabaseProvider) { + this.geoIpDatabaseProvider = geoIpDatabaseProvider; } @Override @@ -409,17 +432,17 @@ public Processor create( DEPRECATION_LOGGER.warn(DeprecationCategory.OTHER, "default_databases_message", DEFAULT_DATABASES_DEPRECATION_MESSAGE); } - DatabaseReaderLazyLoader lazyLoader = databaseNodeService.getDatabase(databaseFile); - if (useDatabaseUnavailableProcessor(lazyLoader, databaseFile)) { + GeoIpDatabase geoIpDatabase = geoIpDatabaseProvider.getDatabase(databaseFile); + if (useDatabaseUnavailableProcessor(geoIpDatabase, databaseFile)) { return new DatabaseUnavailableProcessor(processorTag, description, databaseFile); - } else if (lazyLoader == null) { + } else if (geoIpDatabase == null) { throw newConfigurationException(TYPE, processorTag, "database_file", "database file [" + databaseFile + "] doesn't exist"); } final String databaseType; try { - databaseType = lazyLoader.getDatabaseType(); + databaseType = geoIpDatabase.getDatabaseType(); } finally { - lazyLoader.postLookup(); + geoIpDatabase.release(); } final Set properties; @@ -449,54 +472,12 @@ public Processor create( ); } } - CheckedSupplier supplier = () -> { - DatabaseReaderLazyLoader loader = databaseNodeService.getDatabase(databaseFile); - if (useDatabaseUnavailableProcessor(loader, databaseFile)) { - return null; - } else if (loader == null) { - throw new ResourceNotFoundException("database file [" + databaseFile + "] doesn't exist"); - } - // Only check whether the suffix has changed and not the entire database type. - // To sanity check whether a city db isn't overwriting with a country or asn db. - // For example overwriting a geoip lite city db with geoip city db is a valid change, but the db type is slightly different, - // by checking just the suffix this assertion doesn't fail. - String expectedSuffix = databaseType.substring(databaseType.lastIndexOf('-')); - assert loader.getDatabaseType().endsWith(expectedSuffix) - : "database type [" + loader.getDatabaseType() + "] doesn't match with expected suffix [" + expectedSuffix + "]"; - return loader; - }; - Supplier isValid = () -> { - ClusterState currentState = clusterService.state(); - assert currentState != null; - - PersistentTask task = getTaskWithId(currentState, GeoIpDownloader.GEOIP_DOWNLOADER); - if (task == null || task.getState() == null) { - return true; - } - GeoIpTaskState state = (GeoIpTaskState) task.getState(); - GeoIpTaskState.Metadata metadata = state.getDatabases().get(databaseFile); - // we never remove metadata from cluster state, if metadata is null we deal with built-in database, which is always valid - if (metadata == null) { - return true; - } - - boolean valid = metadata.isValid(currentState.metadata().settings()); - if (valid && metadata.isCloseToExpiration()) { - HeaderWarning.addWarning( - "database [{}] was not updated for over 25 days, geoip processor" - + " will stop working if there is no update for 30 days", - databaseFile - ); - } - - return valid; - }; return new GeoIpProcessor( processorTag, description, ipField, - supplier, - isValid, + new DatabaseVerifyingSupplier(geoIpDatabaseProvider, databaseFile, databaseType), + () -> geoIpDatabaseProvider.isValid(databaseFile), targetField, properties, ignoreMissing, @@ -505,12 +486,12 @@ public Processor create( ); } - private static boolean useDatabaseUnavailableProcessor(DatabaseReaderLazyLoader loader, String databaseName) { - // If there is no loader for a database we should fail with a config error, but - // if there is no loader for a builtin database that we manage via GeoipDownloader then don't fail. + private static boolean useDatabaseUnavailableProcessor(GeoIpDatabase database, String databaseName) { + // If there is no instance for a database we should fail with a config error, but + // if there is no instance for a builtin database that we manage via GeoipDownloader then don't fail. // In the latter case the database should become available at a later moment, so a processor impl // is returned that tags documents instead. - return loader == null && IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES.contains(databaseName); + return database == null && IngestGeoIpPlugin.DEFAULT_DATABASE_FILENAMES.contains(databaseName); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java index 6997964ace89..a8fe538f954d 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java @@ -8,7 +8,7 @@ package org.elasticsearch.ingest.geoip; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.persistent.PersistentTaskParams; @@ -41,8 +41,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_13_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_13_0; } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index 40f2b49254a4..c924c19210f4 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -8,7 +8,7 @@ package org.elasticsearch.ingest.geoip; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.VersionedNamedWriteable; @@ -124,8 +124,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_13_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_13_0; } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 6be4b1bf6e67..7873519478a2 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -85,9 +85,10 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd public List> getSettings() { return Arrays.asList( CACHE_SIZE, + GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, + GeoIpDownloaderTaskExecutor.ENABLED_SETTING, GeoIpDownloader.ENDPOINT_SETTING, - GeoIpDownloader.POLL_INTERVAL_SETTING, - GeoIpDownloaderTaskExecutor.ENABLED_SETTING + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING ); } @@ -97,9 +98,15 @@ public Map getProcessors(Processor.Parameters paramet long cacheSize = CACHE_SIZE.get(parameters.env.settings()); GeoIpCache geoIpCache = new GeoIpCache(cacheSize); - DatabaseNodeService registry = new DatabaseNodeService(parameters.env, parameters.client, geoIpCache, parameters.genericExecutor); + DatabaseNodeService registry = new DatabaseNodeService( + parameters.env, + parameters.client, + geoIpCache, + parameters.genericExecutor, + parameters.ingestService.getClusterService() + ); databaseRegistry.set(registry); - return Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(registry, parameters.ingestService.getClusterService())); + return Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(registry)); } @Override @@ -116,16 +123,17 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { try { String nodeId = nodeEnvironment.nodeId(); - databaseRegistry.get().initialize(nodeId, resourceWatcherService, ingestService.get(), clusterService); + databaseRegistry.get().initialize(nodeId, resourceWatcherService, ingestService.get()); } catch (IOException e) { throw new UncheckedIOException(e); } geoIpDownloaderTaskExecutor = new GeoIpDownloaderTaskExecutor(client, new HttpClient(), clusterService, threadPool); + geoIpDownloaderTaskExecutor.init(); return List.of(databaseRegistry.get(), geoIpDownloaderTaskExecutor); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java index a160dfeec9b4..228758e886c6 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.ingest.geoip.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -166,7 +166,7 @@ protected NodeResponse(StreamInput in) throws IOException { stats = in.readBoolean() ? new GeoIpDownloaderStats(in) : null; databases = in.readSet(StreamInput::readString); filesInTemp = in.readSet(StreamInput::readString); - configDatabases = in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readSet(StreamInput::readString) : null; + configDatabases = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readSet(StreamInput::readString) : null; } protected NodeResponse( @@ -208,7 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(databases, StreamOutput::writeString); out.writeCollection(filesInTemp, StreamOutput::writeString); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeCollection(configDatabases, StreamOutput::writeString); } } diff --git a/modules/ingest-geoip/src/main/plugin-metadata/plugin-security.policy b/modules/ingest-geoip/src/main/plugin-metadata/plugin-security.policy index 2f1e80e8e557..7002fba5c0c4 100644 --- a/modules/ingest-geoip/src/main/plugin-metadata/plugin-security.policy +++ b/modules/ingest-geoip/src/main/plugin-metadata/plugin-security.policy @@ -7,13 +7,5 @@ */ grant { - // needed because jackson-databind is using Class#getDeclaredConstructors(), Class#getDeclaredMethods() and - // Class#getDeclaredAnnotations() to find all public, private, protected, package protected and - // private constructors, methods or annotations. Just locating all public constructors, methods and annotations - // should be enough, so this permission wouldn't then be needed. Unfortunately this is not what jackson-databind does - // or can be configured to do. - permission java.lang.RuntimePermission "accessDeclaredMembers"; - // Also needed because of jackson-databind: - permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.net.SocketPermission "*", "connect"; }; diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index b775ffdb1f06..f2f6bb343f87 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.TermQueryBuilder; @@ -124,8 +125,8 @@ public void setup() throws IOException { ingestService = mock(IngestService.class); clusterService = mock(ClusterService.class); geoIpTmpDir = createTempDir(); - databaseNodeService = new DatabaseNodeService(geoIpTmpDir, client, cache, configDatabases, Runnable::run); - databaseNodeService.initialize("nodeId", resourceWatcherService, ingestService, clusterService); + databaseNodeService = new DatabaseNodeService(geoIpTmpDir, client, cache, configDatabases, Runnable::run, clusterService); + databaseNodeService.initialize("nodeId", resourceWatcherService, ingestService); } @After @@ -150,7 +151,7 @@ public void testCheckDatabases() throws Exception { assertThat(databaseNodeService.getDatabase("GeoIP2-City.mmdb"), nullValue()); // Nothing should be downloaded, since the database is no longer valid (older than 30 days) databaseNodeService.checkDatabases(state); - DatabaseReaderLazyLoader database = databaseNodeService.getDatabase("GeoIP2-City.mmdb"); + DatabaseReaderLazyLoader database = databaseNodeService.getDatabaseReaderLazyLoader("GeoIP2-City.mmdb"); assertThat(database, nullValue()); verify(client, times(0)).search(any()); verify(ingestService, times(0)).reloadPipeline(anyString()); @@ -168,7 +169,7 @@ public void testCheckDatabases() throws Exception { // Database should be downloaded databaseNodeService.checkDatabases(state); - database = databaseNodeService.getDatabase("GeoIP2-City.mmdb"); + database = databaseNodeService.getDatabaseReaderLazyLoader("GeoIP2-City.mmdb"); assertThat(database, notNullValue()); verify(client, times(10)).search(any()); try (Stream files = Files.list(geoIpTmpDir.resolve("geoip-databases").resolve("nodeId"))) { @@ -382,7 +383,8 @@ static ClusterState createClusterState(PersistentTasksCustomMetadata tasksCustom new ShardId(index, 0), true, RecoverySource.ExistingStoreRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "") + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""), + ShardRouting.Role.DEFAULT ); String nodeId = ESTestCase.randomAlphaOfLength(8); shardRouting = shardRouting.initialize(nodeId, null, shardRouting.getExpectedShardSize()); @@ -410,7 +412,7 @@ private static List gzip(String name, String content, int chunks) throws byte[] header = new byte[512]; byte[] nameBytes = name.getBytes(StandardCharsets.UTF_8); byte[] contentBytes = content.getBytes(StandardCharsets.UTF_8); - byte[] sizeBytes = formatted("%1$012o", contentBytes.length).getBytes(StandardCharsets.UTF_8); + byte[] sizeBytes = Strings.format("%1$012o", contentBytes.length).getBytes(StandardCharsets.UTF_8); System.arraycopy(nameBytes, 0, header, 0, nameBytes.length); System.arraycopy(sizeBytes, 0, header, 124, 12); gzipOutputStream.write(header); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java new file mode 100644 index 000000000000..5cbe205f5c9c --- /dev/null +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.ingest.geoip; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.ingest.IngestMetadata; +import org.elasticsearch.ingest.PipelineConfiguration; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class GeoIpDownloaderTaskExecutorTests extends ESTestCase { + public void testHasAtLeastOneGeoipProcessor() { + Map configs = new HashMap<>(); + IngestMetadata ingestMetadata = new IngestMetadata(configs); + ClusterState clusterState = mock(ClusterState.class); + Metadata metadata = mock(Metadata.class); + when(metadata.custom(IngestMetadata.TYPE)).thenReturn(ingestMetadata); + when(clusterState.getMetadata()).thenReturn(metadata); + List expectHitsInputs = getPipelinesWithGeoIpProcessors(); + List expectMissesInputs = getPipelinesWithoutGeoIpProcessors(); + { + // Test that hasAtLeastOneGeoipProcessor returns true for any pipeline with a geoip processor: + for (String pipeline : expectHitsInputs) { + configs.clear(); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray(pipeline), XContentType.JSON)); + assertTrue(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + { + // Test that hasAtLeastOneGeoipProcessor returns false for any pipeline without a geoip processor: + for (String pipeline : expectMissesInputs) { + configs.clear(); + configs.put("_id1", new PipelineConfiguration("_id1", new BytesArray(pipeline), XContentType.JSON)); + assertFalse(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + { + /* + * Now test that hasAtLeastOneGeoipProcessor returns true for a mix of pipelines, some which have geoip processors and some + * which do not: + */ + configs.clear(); + for (String pipeline : expectHitsInputs) { + String id = randomAlphaOfLength(20); + configs.put(id, new PipelineConfiguration(id, new BytesArray(pipeline), XContentType.JSON)); + } + for (String pipeline : expectMissesInputs) { + String id = randomAlphaOfLength(20); + configs.put(id, new PipelineConfiguration(id, new BytesArray(pipeline), XContentType.JSON)); + } + assertTrue(GeoIpDownloaderTaskExecutor.hasAtLeastOneGeoipProcessor(clusterState)); + } + } + + /* + * This method returns an assorted list of pipelines that have geoip processors -- ones that ought to cause hasAtLeastOneGeoipProcessor + * to return true. + */ + private List getPipelinesWithGeoIpProcessors() { + String simpleGeoIpProcessor = """ + { + "processors":[ + { + "geoip":{ + "field":"provider" + } + } + ] + } + """; + String onFailureWithGeoIpProcessor = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "geoip":{ + "field":"error.message" + } + } + ] + } + } + ] + } + """; + String foreachWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + ] + } + """; + String nestedForeachWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "foreach":{ + "field":"someField", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + } + } + ] + } + """; + String nestedForeachWithOnFailureWithGeoIpProcessor = """ + { + "processors":[ + { + "foreach":{ + "field":"values", + "processor": + { + "foreach":{ + "field":"someField", + "processor": + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "geoip":{ + "field":"error.message" + } + } + ] + } + } + } + } + } + } + ] + } + """; + String onFailureWithForeachWithGeoIp = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "foreach":{ + "field":"values", + "processor": + { + "geoip":{ + "field":"someField" + } + } + } + } + ] + } + } + ] + } + """; + return List.of( + simpleGeoIpProcessor, + onFailureWithGeoIpProcessor, + foreachWithGeoIpProcessor, + nestedForeachWithGeoIpProcessor, + nestedForeachWithOnFailureWithGeoIpProcessor, + onFailureWithForeachWithGeoIp + ); + } + + /* + * This method returns an assorted list of pipelines that _do not_ have geoip processors -- ones that ought to cause + * hasAtLeastOneGeoipProcessor to return false. + */ + private List getPipelinesWithoutGeoIpProcessors() { + String empty = """ + { + } + """; + String noProcessors = """ + { + "processors":[ + ] + } + """; + String onFailureWithForeachWithSet = """ + { + "processors":[ + { + "rename":{ + "field":"provider", + "target_field":"cloud.provider", + "on_failure":[ + { + "foreach":{ + "field":"values", + "processor": + { + "set":{ + "field":"someField" + } + } + } + } + ] + } + } + ] + } + """; + return List.of(empty, noProcessors, onFailureWithForeachWithSet); + } +} diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 16088deb86b3..9f3334a07d8f 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -53,6 +53,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -81,7 +82,12 @@ public void setup() { when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, - Set.of(GeoIpDownloader.ENDPOINT_SETTING, GeoIpDownloader.POLL_INTERVAL_SETTING, GeoIpDownloaderTaskExecutor.ENABLED_SETTING) + Set.of( + GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING, + GeoIpDownloader.ENDPOINT_SETTING, + GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING, + GeoIpDownloaderTaskExecutor.ENABLED_SETTING + ) ) ); ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); @@ -98,7 +104,10 @@ public void setup() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ); } @@ -252,7 +261,10 @@ public void testProcessDatabaseNew() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -298,7 +310,10 @@ public void testProcessDatabaseUpdate() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -346,7 +361,10 @@ public void testProcessDatabaseSame() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override void updateTaskState() { @@ -387,7 +405,10 @@ public void testUpdateTaskState() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override public void updatePersistentTaskState(PersistentTaskState state, ActionListener> listener) { @@ -414,7 +435,10 @@ public void testUpdateTaskStateError() { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + () -> true ) { @Override public void updatePersistentTaskState(PersistentTaskState state, ActionListener> listener) { @@ -440,6 +464,7 @@ public void testUpdateDatabases() throws IOException { builder.close(); when(httpClient.getBytes("a.b?elastic_geoip_service_tos=agree")).thenReturn(baos.toByteArray()); Iterator> it = maps.iterator(); + final AtomicBoolean atLeastOneGeoipProcessor = new AtomicBoolean(false); geoIpDownloader = new GeoIpDownloader( client, httpClient, @@ -451,7 +476,10 @@ public void testUpdateDatabases() throws IOException { "", "", EMPTY_TASK_ID, - Collections.emptyMap() + Collections.emptyMap(), + () -> GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING.getDefault(Settings.EMPTY), + () -> GeoIpDownloaderTaskExecutor.EAGER_DOWNLOAD_SETTING.getDefault(Settings.EMPTY), + atLeastOneGeoipProcessor::get ) { @Override void processDatabase(Map databaseInfo) { @@ -459,6 +487,9 @@ void processDatabase(Map databaseInfo) { } }; geoIpDownloader.updateDatabases(); + assertTrue(it.hasNext()); + atLeastOneGeoipProcessor.set(true); + geoIpDownloader.updateDatabases(); assertFalse(it.hasNext()); } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 809e3b833127..d9ee6cda883e 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -72,10 +72,10 @@ public void loadDatabaseReaders() throws IOException { configDatabases = new ConfigDatabases(geoIpConfigDir, new GeoIpCache(1000)); copyDatabaseFiles(geoIpConfigDir, configDatabases); geoipTmpDir = createTempDir(); - databaseNodeService = new DatabaseNodeService(geoipTmpDir, client, cache, configDatabases, Runnable::run); - databaseNodeService.initialize("nodeId", mock(ResourceWatcherService.class), mock(IngestService.class), mock(ClusterService.class)); clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); + databaseNodeService = new DatabaseNodeService(geoipTmpDir, client, cache, configDatabases, Runnable::run, clusterService); + databaseNodeService.initialize("nodeId", mock(ResourceWatcherService.class), mock(IngestService.class)); } @After @@ -85,7 +85,7 @@ public void closeDatabaseReaders() throws IOException { } public void testBuildDefaults() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -101,7 +101,7 @@ public void testBuildDefaults() throws Exception { } public void testSetIgnoreMissing() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -118,7 +118,7 @@ public void testSetIgnoreMissing() throws Exception { } public void testCountryBuildDefaults() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -136,7 +136,7 @@ public void testCountryBuildDefaults() throws Exception { } public void testAsnBuildDefaults() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -154,7 +154,7 @@ public void testAsnBuildDefaults() throws Exception { } public void testBuildTargetField() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("target_field", "_field"); @@ -165,7 +165,7 @@ public void testBuildTargetField() throws Exception { } public void testBuildDbFile() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-Country.mmdb"); @@ -178,7 +178,7 @@ public void testBuildDbFile() throws Exception { } public void testBuildWithCountryDbAndAsnFields() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-Country.mmdb"); @@ -198,7 +198,7 @@ public void testBuildWithCountryDbAndAsnFields() throws Exception { } public void testBuildWithAsnDbAndCityFields() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("database_file", "GeoLite2-ASN.mmdb"); @@ -219,7 +219,7 @@ public void testBuildNonExistingDbFile() throws Exception { geoipTmpDir.resolve("GeoLite2-City.mmdb") ); databaseNodeService.updateDatabase("GeoLite2-City.mmdb", "md5", geoipTmpDir.resolve("GeoLite2-City.mmdb")); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -229,7 +229,7 @@ public void testBuildNonExistingDbFile() throws Exception { } public void testBuildBuiltinDatabaseMissing() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); cleanDatabaseFiles(geoIpConfigDir, configDatabases); Map config = new HashMap<>(); @@ -240,7 +240,7 @@ public void testBuildBuiltinDatabaseMissing() throws Exception { } public void testBuildFields() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Set properties = EnumSet.noneOf(GeoIpProcessor.Property.class); List fieldNames = new ArrayList<>(); @@ -264,7 +264,7 @@ public void testBuildFields() throws Exception { } public void testBuildIllegalFieldOption() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config1 = new HashMap<>(); config1.put("field", "_field"); @@ -298,8 +298,15 @@ public void testLazyLoading() throws Exception { // database readers used at class level are reused between tests. (we want to keep that otherwise running this // test will take roughly 4 times more time) Client client = mock(Client.class); - DatabaseNodeService databaseNodeService = new DatabaseNodeService(createTempDir(), client, cache, configDatabases, Runnable::run); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + DatabaseNodeService databaseNodeService = new DatabaseNodeService( + createTempDir(), + client, + cache, + configDatabases, + Runnable::run, + clusterService + ); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); for (DatabaseReaderLazyLoader lazyLoader : configDatabases.getConfigDatabases().values()) { assertNull(lazyLoader.databaseReader.get()); } @@ -313,10 +320,10 @@ public void testLazyLoading() throws Exception { final GeoIpProcessor city = (GeoIpProcessor) factory.create(null, "_tag", null, config); // these are lazy loaded until first use so we expect null here - assertNull(databaseNodeService.getDatabase("GeoLite2-City.mmdb").databaseReader.get()); + assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-City.mmdb").databaseReader.get()); city.execute(document); // the first ingest should trigger a database load - assertNotNull(databaseNodeService.getDatabase("GeoLite2-City.mmdb").databaseReader.get()); + assertNotNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-City.mmdb").databaseReader.get()); config = new HashMap<>(); config.put("field", "_field"); @@ -324,10 +331,10 @@ public void testLazyLoading() throws Exception { final GeoIpProcessor country = (GeoIpProcessor) factory.create(null, "_tag", null, config); // these are lazy loaded until first use so we expect null here - assertNull(databaseNodeService.getDatabase("GeoLite2-Country.mmdb").databaseReader.get()); + assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-Country.mmdb").databaseReader.get()); country.execute(document); // the first ingest should trigger a database load - assertNotNull(databaseNodeService.getDatabase("GeoLite2-Country.mmdb").databaseReader.get()); + assertNotNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-Country.mmdb").databaseReader.get()); config = new HashMap<>(); config.put("field", "_field"); @@ -335,10 +342,10 @@ public void testLazyLoading() throws Exception { final GeoIpProcessor asn = (GeoIpProcessor) factory.create(null, "_tag", null, config); // these are lazy loaded until first use so we expect null here - assertNull(databaseNodeService.getDatabase("GeoLite2-ASN.mmdb").databaseReader.get()); + assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-ASN.mmdb").databaseReader.get()); asn.execute(document); // the first ingest should trigger a database load - assertNotNull(databaseNodeService.getDatabase("GeoLite2-ASN.mmdb").databaseReader.get()); + assertNotNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoLite2-ASN.mmdb").databaseReader.get()); } @SuppressWarnings("HiddenField") @@ -360,9 +367,16 @@ public void testLoadingCustomDatabase() throws IOException { ResourceWatcherService resourceWatcherService = new ResourceWatcherService(Settings.EMPTY, threadPool); Client client = mock(Client.class); GeoIpCache cache = new GeoIpCache(1000); - DatabaseNodeService databaseNodeService = new DatabaseNodeService(createTempDir(), client, cache, configDatabases, Runnable::run); - databaseNodeService.initialize("nodeId", resourceWatcherService, mock(IngestService.class), mock(ClusterService.class)); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + DatabaseNodeService databaseNodeService = new DatabaseNodeService( + createTempDir(), + client, + cache, + configDatabases, + Runnable::run, + clusterService + ); + databaseNodeService.initialize("nodeId", resourceWatcherService, mock(IngestService.class)); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); for (DatabaseReaderLazyLoader lazyLoader : configDatabases.getConfigDatabases().values()) { assertNull(lazyLoader.databaseReader.get()); } @@ -376,16 +390,16 @@ public void testLoadingCustomDatabase() throws IOException { final GeoIpProcessor city = (GeoIpProcessor) factory.create(null, "_tag", null, config); // these are lazy loaded until first use so we expect null here - assertNull(databaseNodeService.getDatabase("GeoIP2-City.mmdb").databaseReader.get()); + assertNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoIP2-City.mmdb").databaseReader.get()); city.execute(document); // the first ingest should trigger a database load - assertNotNull(databaseNodeService.getDatabase("GeoIP2-City.mmdb").databaseReader.get()); + assertNotNull(databaseNodeService.getDatabaseReaderLazyLoader("GeoIP2-City.mmdb").databaseReader.get()); resourceWatcherService.close(); threadPool.shutdown(); } public void testFallbackUsingDefaultDatabases() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "source_field"); config.put("fallback_to_default_databases", randomBoolean()); @@ -402,7 +416,7 @@ public void testDefaultDatabaseWithTaskPresent() throws Exception { .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks)) .build(); when(clusterService.state()).thenReturn(clusterState); - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -414,7 +428,7 @@ public void testDefaultDatabaseWithTaskPresent() throws Exception { } public void testUpdateDatabaseWhileIngesting() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); Map config = new HashMap<>(); config.put("field", "source_field"); GeoIpProcessor processor = (GeoIpProcessor) factory.create(null, null, null, config); @@ -453,7 +467,7 @@ public void testUpdateDatabaseWhileIngesting() throws Exception { } public void testDatabaseNotReadyYet() throws Exception { - GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService, clusterService); + GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); cleanDatabaseFiles(geoIpConfigDir, configDatabases); { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java index bbefc307e800..80c72238b967 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorTests.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; @@ -427,6 +428,35 @@ public void testListNoMatches() throws Exception { assertFalse(ingestDocument.hasField("target_field")); } + public void testListDatabaseReferenceCounting() throws Exception { + AtomicBoolean closeCheck = new AtomicBoolean(false); + var loader = loader("/GeoLite2-City.mmdb", closeCheck); + GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), null, "source_field", () -> { + loader.preLookup(); + return loader; + }, () -> true, "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false, false, "filename"); + + Map document = new HashMap<>(); + document.put("source_field", Arrays.asList("8.8.8.8", "82.171.64.0")); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + processor.execute(ingestDocument); + + @SuppressWarnings("unchecked") + List> geoData = (List>) ingestDocument.getSourceAndMetadata().get("target_field"); + + Map location = new HashMap<>(); + location.put("lat", 37.751d); + location.put("lon", -97.822d); + assertThat(geoData.get(0).get("location"), equalTo(location)); + + assertThat(geoData.get(1).get("city_name"), equalTo("Hoensbroek")); + + // Check the loader's reference count and attempt to close + assertThat(loader.current(), equalTo(0)); + loader.close(); + assertTrue(closeCheck.get()); + } + public void testListFirstOnly() throws Exception { GeoIpProcessor processor = new GeoIpProcessor( randomAlphaOfLength(10), @@ -545,12 +575,17 @@ public void testNoDatabase_ignoreMissing() throws Exception { assertIngestDocument(originalIngestDocument, ingestDocument); } - private CheckedSupplier loader(final String path) { + private CheckedSupplier loader(final String path) { + var loader = loader(path, null); + return () -> loader; + } + + private DatabaseReaderLazyLoader loader(final String path, final AtomicBoolean closed) { final Supplier databaseInputStreamSupplier = () -> GeoIpProcessor.class.getResourceAsStream(path); final CheckedSupplier loader = () -> new DatabaseReader.Builder(databaseInputStreamSupplier.get()) .build(); final GeoIpCache cache = new GeoIpCache(1000); - DatabaseReaderLazyLoader lazyLoader = new DatabaseReaderLazyLoader(cache, PathUtils.get(path), null, loader) { + return new DatabaseReaderLazyLoader(cache, PathUtils.get(path), null, loader) { @Override long databaseFileSize() throws IOException { @@ -571,8 +606,14 @@ InputStream databaseInputStream() throws IOException { return databaseInputStreamSupplier.get(); } + @Override + protected void doClose() throws IOException { + if (closed != null) { + closed.set(true); + } + super.doClose(); + } }; - return () -> lazyLoader; } } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTaskStateSerializationTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTaskStateSerializationTests.java index 137a5160b439..80e4836605e2 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTaskStateSerializationTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpTaskStateSerializationTests.java @@ -41,4 +41,9 @@ protected GeoIpTaskState createTestInstance() { } return state; } + + @Override + protected GeoIpTaskState mutateInstance(GeoIpTaskState instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionNodeResponseSerializingTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionNodeResponseSerializingTests.java index 9e49d33089ea..6f3d84c2ce30 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionNodeResponseSerializingTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionNodeResponseSerializingTests.java @@ -28,6 +28,11 @@ protected GeoIpDownloaderStatsAction.NodeResponse createTestInstance() { return createRandomInstance(); } + @Override + protected GeoIpDownloaderStatsAction.NodeResponse mutateInstance(GeoIpDownloaderStatsAction.NodeResponse instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + static GeoIpDownloaderStatsAction.NodeResponse createRandomInstance() { DiscoveryNode node = new DiscoveryNode("id", buildNewFakeTransportAddress(), Version.CURRENT); Set databases = Set.copyOf(randomList(10, () -> randomAlphaOfLengthBetween(5, 10))); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionResponseSerializingTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionResponseSerializingTests.java index 756b012b996f..b9fe1ee976d6 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionResponseSerializingTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsActionResponseSerializingTests.java @@ -31,4 +31,9 @@ protected GeoIpDownloaderStatsAction.Response createTestInstance() { ); return new GeoIpDownloaderStatsAction.Response(ClusterName.DEFAULT, nodeResponses, Collections.emptyList()); } + + @Override + protected GeoIpDownloaderStatsAction.Response mutateInstance(GeoIpDownloaderStatsAction.Response instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java index cc6fd1ca903a..68b1ac4b28ff 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsSerializingTests.java @@ -31,6 +31,11 @@ protected GeoIpDownloaderStats createTestInstance() { return createRandomInstance(); } + @Override + protected GeoIpDownloaderStats mutateInstance(GeoIpDownloaderStats instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + static GeoIpDownloaderStats createRandomInstance() { GeoIpDownloaderStats stats = GeoIpDownloaderStats.EMPTY.databasesCount(randomInt(1000)); int successes = randomInt(20); diff --git a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java index 5b40f4a6ada4..8584229ec171 100644 --- a/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java +++ b/modules/ingest-geoip/src/yamlRestTest/java/org/elasticsearch/ingest/geoip/IngestGeoIpClientYamlTestSuiteIT.java @@ -11,11 +11,17 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; import org.elasticsearch.client.Request; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -36,6 +42,7 @@ public static Iterable parameters() throws Exception { @Before public void waitForDatabases() throws Exception { + putGeoipPipeline(); assertBusy(() -> { Request request = new Request("GET", "/_ingest/geoip/stats"); Map response = entityAsMap(client().performRequest(request)); @@ -53,4 +60,37 @@ public void waitForDatabases() throws Exception { }); } + /** + * This creates a pipeline with a geoip processor so that the GeoipDownloader will download its databases. + * @throws IOException + */ + private void putGeoipPipeline() throws IOException { + final BytesReference bytes; + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startArray("processors"); + { + builder.startObject(); + { + builder.startObject("geoip"); + { + builder.field("field", "ip"); + builder.field("target_field", "ip-city"); + builder.field("database_file", "GeoLite2-City.mmdb"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + bytes = BytesReference.bytes(builder); + } + Request putPipelineRequest = new Request("PUT", "/_ingest/pipeline/pipeline-with-geoip"); + putPipelineRequest.setEntity(new ByteArrayEntity(bytes.array(), ContentType.APPLICATION_JSON)); + client().performRequest(putPipelineRequest); + } + } diff --git a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml index b09dac97eba2..84801cfb9ada 100644 --- a/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml +++ b/modules/ingest-geoip/src/yamlRestTest/resources/rest-api-spec/test/ingest_geoip/20_geoip_processor.yml @@ -309,3 +309,39 @@ - match: { _source.geoip.asn: 29518 } - match: { _source.geoip.organization_name: "Bredband2 AB" } - match: { _source.geoip.network: "89.160.0.0/17" } + +--- +"Test simulate with Geoip Processor": +- do: + ingest.put_pipeline: + id: "pipeline1" + body: > + { + "processors": [ + { + "geoip": { + "field": "source.ip", + "target_field": "source.geo" + } + } + ] + } +- match: { acknowledged: true } + +- do: + ingest.simulate: + id: "pipeline1" + body: > + { + "docs": [ + { + "_source": { + "source": { + "ip": "89.160.20.128" + } + } + } + ] + } +- length: { docs: 1 } +- match: { docs.0.doc._source.source.geo.city_name: "Linköping" } diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index bdf7fae42d84..5d3ae968b787 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'Ingest processor that extracts information from a user agent' diff --git a/modules/kibana/build.gradle b/modules/kibana/build.gradle index d334fa380d7b..e57e16409495 100644 --- a/modules/kibana/build.gradle +++ b/modules/kibana/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' esplugin { description 'Plugin exposing APIs for Kibana system indices' diff --git a/modules/kibana/src/javaRestTest/java/org/elasticsearch/kibana/KibanaSystemIndexIT.java b/modules/kibana/src/javaRestTest/java/org/elasticsearch/kibana/KibanaSystemIndexIT.java index 08b42cc49b90..bb1e32671a84 100644 --- a/modules/kibana/src/javaRestTest/java/org/elasticsearch/kibana/KibanaSystemIndexIT.java +++ b/modules/kibana/src/javaRestTest/java/org/elasticsearch/kibana/KibanaSystemIndexIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.json.JsonXContent; @@ -69,7 +70,7 @@ public void testAliases() throws IOException { public void testBulkToKibanaIndex() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } """, indexName)); @@ -79,7 +80,7 @@ public void testBulkToKibanaIndex() throws IOException { public void testRefresh() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } """, indexName)); @@ -100,7 +101,7 @@ public void testRefresh() throws IOException { public void testGetFromKibanaIndex() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } """, indexName)); @@ -119,7 +120,7 @@ public void testGetFromKibanaIndex() throws IOException { public void testMultiGetFromKibanaIndex() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } { "index" : { "_index" : "%s", "_id" : "2" } } @@ -131,7 +132,7 @@ public void testMultiGetFromKibanaIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); Request getRequest = request("GET", "/_mget"); - getRequest.setJsonEntity(formatted(""" + getRequest.setJsonEntity(Strings.format(""" { "docs": [ { @@ -155,7 +156,7 @@ public void testMultiGetFromKibanaIndex() throws IOException { public void testSearchFromKibanaIndex() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } { "index" : { "_index" : "%s", "_id" : "2" } } @@ -181,7 +182,7 @@ public void testSearchFromKibanaIndex() throws IOException { public void testDeleteFromKibanaIndex() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } { "index" : { "_index" : "%s", "_id" : "2" } } @@ -199,7 +200,7 @@ public void testDeleteFromKibanaIndex() throws IOException { public void testDeleteByQueryFromKibanaIndex() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } { "index" : { "_index" : "%s", "_id" : "2" } } @@ -289,7 +290,7 @@ public void testIndexingAndUpdatingDocs() throws IOException { public void testScrollingDocs() throws IOException { Request request = request("POST", "/_bulk"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "index" : { "_index" : "%s", "_id" : "1" } } { "foo" : "bar" } { "index" : { "_index" : "%s", "_id" : "2" } } diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 1581de127427..66c9dcf6a236 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -17,7 +17,7 @@ esplugin { dependencies { api "org.apache.lucene:lucene-expressions:${versions.lucene}" runtimeOnly "org.apache.lucene:lucene-codecs:${versions.lucene}" - runtimeOnly 'org.antlr:antlr4-runtime:4.5.1-1' + runtimeOnly "org.antlr:antlr4-runtime:${versions.antlr4}" runtimeOnly 'org.ow2.asm:asm:7.2' runtimeOnly 'org.ow2.asm:asm-commons:7.2' runtimeOnly 'org.ow2.asm:asm-tree:7.2' diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java index 6d843e7b8065..e573d32f7b6c 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionFieldScriptTests.java @@ -15,8 +15,9 @@ import org.elasticsearch.script.FieldScript; import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.text.ParseException; @@ -53,7 +54,7 @@ public void setUp() throws Exception { lookup = new SearchLookup( field -> field.equals("field") ? fieldType : null, (ignored, _lookup, fdt) -> fieldData, - new SourceLookup.ReaderSourceProvider() + (ctx, doc) -> Source.empty(XContentType.JSON) ); } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java index 30e94ed37947..eedd80bf6d29 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionNumberSortScriptTests.java @@ -17,8 +17,9 @@ import org.elasticsearch.script.NumberSortScript; import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.text.ParseException; @@ -54,7 +55,7 @@ public void setUp() throws Exception { lookup = new SearchLookup( field -> field.equals("field") ? fieldType : null, (ignored, _lookup, fdt) -> fieldData, - new SourceLookup.ReaderSourceProvider() + (ctx, doc) -> Source.empty(XContentType.JSON) ); } diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java index ad547c656f4b..d88b2d67e663 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTermsSetQueryTests.java @@ -16,8 +16,9 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.TermsSetQueryScript; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.text.ParseException; @@ -53,7 +54,7 @@ public void setUp() throws Exception { lookup = new SearchLookup( field -> field.equals("field") ? fieldType : null, (ignored, _lookup, fdt) -> fieldData, - new SourceLookup.ReaderSourceProvider() + (ctx, doc) -> Source.empty(XContentType.JSON) ); } diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 7536e2f76abf..c36275699e21 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -5,9 +5,9 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java index 37abffed2fed..6c432c7306d3 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomMustacheFactory.java @@ -14,7 +14,9 @@ import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheException; import com.github.mustachejava.MustacheVisitor; +import com.github.mustachejava.SafeMustacheFactory; import com.github.mustachejava.TemplateContext; +import com.github.mustachejava.TemplateFunction; import com.github.mustachejava.codes.DefaultMustache; import com.github.mustachejava.codes.IterableCode; import com.github.mustachejava.codes.WriteCode; @@ -34,12 +36,11 @@ import java.util.Map; import java.util.Objects; import java.util.StringJoiner; -import java.util.function.Function; import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; -public class CustomMustacheFactory extends DefaultMustacheFactory { +public class CustomMustacheFactory extends SafeMustacheFactory { static final String V7_JSON_MEDIA_TYPE_WITH_CHARSET = "application/json; charset=UTF-8"; static final String JSON_MEDIA_TYPE_WITH_CHARSET = "application/json;charset=utf-8"; static final String JSON_MEDIA_TYPE = "application/json"; @@ -64,7 +65,7 @@ public class CustomMustacheFactory extends DefaultMustacheFactory { private final Encoder encoder; public CustomMustacheFactory(String mediaType) { - super(); + super(Collections.emptySet(), "."); setObjectHandler(new CustomReflectionObjectHandler()); this.encoder = createEncoder(mediaType); } @@ -145,7 +146,7 @@ protected void tag(Writer writer, String tag) throws IOException { writer.write(tc.endChars()); } - protected abstract Function createFunction(Object resolved); + protected abstract TemplateFunction createFunction(Object resolved); /** * At compile time, this function extracts the name of the variable: @@ -188,7 +189,7 @@ static class ToJsonCode extends CustomCode { @Override @SuppressWarnings("unchecked") - protected Function createFunction(Object resolved) { + protected TemplateFunction createFunction(Object resolved) { return s -> { if (resolved == null) { return null; @@ -238,7 +239,7 @@ static class JoinerCode extends CustomCode { } @Override - protected Function createFunction(Object resolved) { + protected TemplateFunction createFunction(Object resolved) { return s -> { if (s == null) { return null; @@ -260,7 +261,7 @@ static boolean match(String variable) { static class CustomJoinerCode extends JoinerCode { - private static final Pattern PATTERN = Pattern.compile("^(?:" + CODE + " delimiter='(.*)')$"); + private static final Pattern PATTERN = Pattern.compile("^" + CODE + " delimiter='(.*)'$"); CustomJoinerCode(TemplateContext tc, DefaultMustacheFactory df, Mustache mustache, String variable) { super(tc, df, mustache, extractDelimiter(variable)); @@ -357,7 +358,7 @@ static class UrlEncoder implements Encoder { @Override public void encode(String s, Writer writer) throws IOException { - writer.write(URLEncoder.encode(s, StandardCharsets.UTF_8.name())); + writer.write(URLEncoder.encode(s, StandardCharsets.UTF_8)); } } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java index d3ec96f68af5..fd2ab43f348e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/CustomReflectionObjectHandler.java @@ -10,10 +10,10 @@ import com.github.mustachejava.reflect.ReflectionObjectHandler; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.iterable.Iterables; +import java.lang.reflect.AccessibleObject; import java.lang.reflect.Array; import java.util.AbstractMap; import java.util.Collection; @@ -40,6 +40,24 @@ public Object coerce(Object object) { } } + @Override + @SuppressWarnings("rawtypes") + protected AccessibleObject findMember(Class sClass, String name) { + /* + * overriding findMember from BaseObjectHandler (our superclass's superclass) to always return null. + * + * if you trace findMember there, you'll see that it always either returns null or invokes the getMethod + * or getField methods of that class. the last thing that getMethod and getField do is call 'setAccessible' + * but we don't have java.lang.reflect.ReflectPermission/suppressAccessChecks so that will always throw an + * exception. + * + * that is, with the permissions we're running with, it would always return null ('not found!') or throw + * an exception ('found, but you cannot do this!') -- so by overriding to null we're effectively saying + * "you will never find success going down this path, so don't bother trying" + */ + return null; + } + static final class ArrayMap extends AbstractMap implements Iterable { private final Object array; @@ -144,10 +162,4 @@ public Iterator iterator() { return col.iterator(); } } - - @Override - public String stringify(Object object) { - CollectionUtils.ensureNoSelfReferences(object, "CustomReflectionObjectHandler stringify"); - return super.stringify(object); - } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 5aa962973b6f..f42648015535 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; @@ -102,7 +102,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { tookInMillis = in.readVLong(); } else { tookInMillis = -1L; @@ -136,7 +136,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { out.writeVLong(tookInMillis); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java index 8abd0d65d58d..243487981c5f 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngine.java @@ -13,7 +13,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.script.GeneralScriptException; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; @@ -24,8 +23,6 @@ import java.io.Reader; import java.io.StringReader; import java.io.StringWriter; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Collections; import java.util.Map; import java.util.Set; @@ -108,12 +105,7 @@ private class MustacheExecutableScript extends TemplateScript { public String execute() { final StringWriter writer = new StringWriter(); try { - // crazy reflection here - SpecialPermission.check(); - AccessController.doPrivileged((PrivilegedAction) () -> { - template.execute(writer, params); - return null; - }); + template.execute(writer, params); } catch (Exception e) { logger.error(() -> format("Error running %s", template), e); throw new GeneralScriptException("Error running " + template, e); diff --git a/modules/lang-mustache/src/main/plugin-metadata/plugin-security.policy b/modules/lang-mustache/src/main/plugin-metadata/plugin-security.policy deleted file mode 100644 index 2fb8547e7b3f..000000000000 --- a/modules/lang-mustache/src/main/plugin-metadata/plugin-security.policy +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -grant { - // needed to do crazy reflection - permission java.lang.RuntimePermission "accessDeclaredMembers"; -}; diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java index ea741e684c07..4845279c4dd8 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/CustomMustacheFactoryTests.java @@ -15,8 +15,6 @@ import java.util.Map; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.elasticsearch.script.mustache.CustomMustacheFactory.JSON_MEDIA_TYPE; import static org.elasticsearch.script.mustache.CustomMustacheFactory.PLAIN_TEXT_MEDIA_TYPE; import static org.elasticsearch.script.mustache.CustomMustacheFactory.X_WWW_FORM_URLENCODED_MEDIA_TYPE; @@ -67,31 +65,31 @@ public void testCreateEncoder() { public void testJsonEscapeEncoder() { final ScriptEngine engine = new MustacheScriptEngine(); - final Map params = randomBoolean() ? singletonMap(Script.CONTENT_TYPE_OPTION, JSON_MEDIA_TYPE) : emptyMap(); + final Map params = randomBoolean() ? Map.of(Script.CONTENT_TYPE_OPTION, JSON_MEDIA_TYPE) : Map.of(); TemplateScript.Factory compiled = engine.compile(null, "{\"field\": \"{{value}}\"}", TemplateScript.CONTEXT, params); - TemplateScript executable = compiled.newInstance(singletonMap("value", "a \"value\"")); + TemplateScript executable = compiled.newInstance(Map.of("value", "a \"value\"")); assertThat(executable.execute(), equalTo("{\"field\": \"a \\\"value\\\"\"}")); } public void testDefaultEncoder() { final ScriptEngine engine = new MustacheScriptEngine(); - final Map params = singletonMap(Script.CONTENT_TYPE_OPTION, PLAIN_TEXT_MEDIA_TYPE); + final Map params = Map.of(Script.CONTENT_TYPE_OPTION, PLAIN_TEXT_MEDIA_TYPE); TemplateScript.Factory compiled = engine.compile(null, "{\"field\": \"{{value}}\"}", TemplateScript.CONTEXT, params); - TemplateScript executable = compiled.newInstance(singletonMap("value", "a \"value\"")); + TemplateScript executable = compiled.newInstance(Map.of("value", "a \"value\"")); assertThat(executable.execute(), equalTo("{\"field\": \"a \"value\"\"}")); } public void testUrlEncoder() { final ScriptEngine engine = new MustacheScriptEngine(); - final Map params = singletonMap(Script.CONTENT_TYPE_OPTION, X_WWW_FORM_URLENCODED_MEDIA_TYPE); + final Map params = Map.of(Script.CONTENT_TYPE_OPTION, X_WWW_FORM_URLENCODED_MEDIA_TYPE); TemplateScript.Factory compiled = engine.compile(null, "{\"field\": \"{{value}}\"}", TemplateScript.CONTEXT, params); - TemplateScript executable = compiled.newInstance(singletonMap("value", "tilde~ AND date:[2016 FROM*]")); + TemplateScript executable = compiled.newInstance(Map.of("value", "tilde~ AND date:[2016 FROM*]")); assertThat(executable.execute(), equalTo("{\"field\": \"tilde%7E+AND+date%3A%5B2016+FROM*%5D\"}")); } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index 2420fed44451..812a281d77c1 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -18,8 +18,7 @@ import java.io.IOException; import java.io.StringWriter; -import java.util.Collections; -import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -38,7 +37,7 @@ public void setup() { } public void testSimpleParameterReplace() { - Map compileParams = Collections.singletonMap("content_type", "application/json"); + Map compileParams = Map.of("content_type", "application/json"); { String template = """ GET _search @@ -61,8 +60,7 @@ public void testSimpleParameterReplace() { } } }"""; - Map vars = new HashMap<>(); - vars.put("boost_val", "0.3"); + Map vars = Map.of("boost_val", "0.3"); String o = qe.compile(null, template, TemplateScript.CONTEXT, compileParams).newInstance(vars).execute(); assertEquals(""" GET _search @@ -108,9 +106,7 @@ public void testSimpleParameterReplace() { } } }"""; - Map vars = new HashMap<>(); - vars.put("boost_val", "0.3"); - vars.put("body_val", "\"quick brown\""); + Map vars = Map.of("boost_val", "0.3", "body_val", "\"quick brown\""); String o = qe.compile(null, template, TemplateScript.CONTEXT, compileParams).newInstance(vars).execute(); assertEquals(""" GET _search @@ -141,7 +137,7 @@ public void testSimple() throws IOException { {"source":{"match_{{template}}": {}},"params":{"template":"all"}}"""; XContentParser parser = createParser(JsonXContent.jsonXContent, templateString); Script script = Script.parse(parser); - TemplateScript.Factory compiled = qe.compile(null, script.getIdOrCode(), TemplateScript.CONTEXT, Collections.emptyMap()); + TemplateScript.Factory compiled = qe.compile(null, script.getIdOrCode(), TemplateScript.CONTEXT, Map.of()); TemplateScript TemplateScript = compiled.newInstance(script.getParams()); assertThat(TemplateScript.execute(), equalTo("{\"match_all\":{}}")); } @@ -157,11 +153,79 @@ public void testParseTemplateAsSingleStringWithConditionalClause() throws IOExce }"""; XContentParser parser = createParser(JsonXContent.jsonXContent, templateString); Script script = Script.parse(parser); - TemplateScript.Factory compiled = qe.compile(null, script.getIdOrCode(), TemplateScript.CONTEXT, Collections.emptyMap()); + TemplateScript.Factory compiled = qe.compile(null, script.getIdOrCode(), TemplateScript.CONTEXT, Map.of()); TemplateScript TemplateScript = compiled.newInstance(script.getParams()); assertThat(TemplateScript.execute(), equalTo("{ \"match_all\":{} }")); } + private static class TestReflection { + + private final int privateField = 1; + + public final int publicField = 2; + + private int getPrivateMethod() { + return 3; + } + + public int getPublicMethod() { + return 4; + } + + @Override + public String toString() { + return List.of(privateField, publicField, getPrivateMethod(), getPublicMethod()).toString(); + } + } + + /** + * BWC test for some odd reflection edge-cases. It's not really expected that customer code would be exercising this, + * but maybe it's out there! Who knows!? + * + * If we change this, we should *know* that we're changing it. + */ + @SuppressWarnings({ "deprecation", "removal" }) + public void testReflection() { + Map vars = Map.of("obj", new TestReflection()); + + { + // non-reflective access calls toString + String templateString = "{{obj}}"; + String o = qe.compile(null, templateString, TemplateScript.CONTEXT, Map.of()).newInstance(vars).execute(); + assertThat(o, equalTo("[1, 2, 3, 4]")); + } + { + // accessing a field/method that *doesn't* exist will give an empty result + String templateString = "{{obj.missing}}"; + String o = qe.compile(null, templateString, TemplateScript.CONTEXT, Map.of()).newInstance(vars).execute(); + assertThat(o, equalTo("")); + } + { + // accessing a private field that does exist will give an empty result + String templateString = "{{obj.privateField}}"; + String o = qe.compile(null, templateString, TemplateScript.CONTEXT, Map.of()).newInstance(vars).execute(); + assertThat(o, equalTo("")); + } + { + // accessing a private method that does exist will give an empty result + String templateString = "{{obj.privateMethod}}"; + String o = qe.compile(null, templateString, TemplateScript.CONTEXT, Map.of()).newInstance(vars).execute(); + assertThat(o, equalTo("")); + } + { + // accessing a public field that does exist will give an empty result + String templateString = "{{obj.publicField}}"; + String o = qe.compile(null, templateString, TemplateScript.CONTEXT, Map.of()).newInstance(vars).execute(); + assertThat(o, equalTo("")); + } + { + // accessing a public method that does exist will give an empty result + String templateString = "{{obj.publicMethod}}"; + String o = qe.compile(null, templateString, TemplateScript.CONTEXT, Map.of()).newInstance(vars).execute(); + assertThat(o, equalTo("")); + } + } + public void testEscapeJson() throws IOException { { StringWriter writer = new StringWriter(); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java index 2c49fe38c56f..e06e69692893 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Strings; import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.TemplateScript; @@ -23,7 +24,6 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; @@ -150,7 +150,7 @@ public void testSizeAccessForCollectionsAndArrays() throws Exception { data.put("list", randomList); Map vars = new HashMap<>(); vars.put("data", data); - String expectedString = String.format(Locale.ROOT, "%s %s", randomArrayValues.length, randomList.size()); + String expectedString = Strings.format("%s %s", randomArrayValues.length, randomList.size()); assertThat(factory.newInstance(vars).execute(), equalTo(expectedString)); } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 17e43a755bed..2dab15f7ddb4 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -9,8 +9,8 @@ import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask; apply plugin: 'elasticsearch.validate-rest-spec' -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'An easy, safe and fast scripting language for Elasticsearch' @@ -34,7 +34,7 @@ configurations { } dependencies { - api 'org.antlr:antlr4-runtime:4.5.3' + api "org.antlr:antlr4-runtime:${versions.antlr4}" api 'org.ow2.asm:asm-util:7.2' api 'org.ow2.asm:asm-tree:7.2' api 'org.ow2.asm:asm-commons:7.2' @@ -174,7 +174,7 @@ configurations { } dependencies { - regenerate 'org.antlr:antlr4:4.5.3' + regenerate "org.antlr:antlr4:${versions.antlr4}" } String grammarPath = 'src/main/antlr' @@ -274,7 +274,7 @@ configurations { } dependencies { - regenerate 'org.antlr:antlr4:4.5.3' + regenerate "org.antlr:antlr4:${versions.antlr4}" } String suggestGrammarPath = 'src/main/antlr' diff --git a/modules/lang-painless/src/main/java/module-info.java b/modules/lang-painless/src/main/java/module-info.java index 7196d87e8402..539c2919b81f 100644 --- a/modules/lang-painless/src/main/java/module-info.java +++ b/modules/lang-painless/src/main/java/module-info.java @@ -13,7 +13,7 @@ requires org.elasticsearch.server; requires org.elasticsearch.xcontent; - requires antlr4.runtime; + requires org.antlr.antlr4.runtime; requires org.apache.lucene.core; requires org.objectweb.asm; requires org.objectweb.asm.commons; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java index a1a4f7ea124e..8b45e7d0edd5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java @@ -8,6 +8,8 @@ package org.elasticsearch.painless; +import org.elasticsearch.core.Strings; +import org.elasticsearch.painless.api.ValueIterator; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupUtility; import org.elasticsearch.painless.lookup.PainlessMethod; @@ -24,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -46,90 +49,6 @@ */ public final class Def { - // TODO: Once Java has a factory for those in java.lang.invoke.MethodHandles, use it: - - /** Helper class for isolating MethodHandles and methods to get the length of arrays - * (to emulate a "arraystore" bytecode using MethodHandles). - * See: https://bugs.openjdk.java.net/browse/JDK-8156915 - */ - @SuppressWarnings("unused") // getArrayLength() methods are are actually used, javac just does not know :) - private static final class ArrayLengthHelper { - private static final MethodHandles.Lookup PRIVATE_METHOD_HANDLES_LOOKUP = MethodHandles.lookup(); - - private static final Map, MethodHandle> ARRAY_TYPE_MH_MAPPING = Collections.unmodifiableMap( - Stream.of( - boolean[].class, - byte[].class, - short[].class, - int[].class, - long[].class, - char[].class, - float[].class, - double[].class, - Object[].class - ).collect(Collectors.toMap(Function.identity(), type -> { - try { - return PRIVATE_METHOD_HANDLES_LOOKUP.findStatic( - PRIVATE_METHOD_HANDLES_LOOKUP.lookupClass(), - "getArrayLength", - MethodType.methodType(int.class, type) - ); - } catch (ReflectiveOperationException e) { - throw new AssertionError(e); - } - })) - ); - - private static final MethodHandle OBJECT_ARRAY_MH = ARRAY_TYPE_MH_MAPPING.get(Object[].class); - - static int getArrayLength(final boolean[] array) { - return array.length; - } - - static int getArrayLength(final byte[] array) { - return array.length; - } - - static int getArrayLength(final short[] array) { - return array.length; - } - - static int getArrayLength(final int[] array) { - return array.length; - } - - static int getArrayLength(final long[] array) { - return array.length; - } - - static int getArrayLength(final char[] array) { - return array.length; - } - - static int getArrayLength(final float[] array) { - return array.length; - } - - static int getArrayLength(final double[] array) { - return array.length; - } - - static int getArrayLength(final Object[] array) { - return array.length; - } - - static MethodHandle arrayLengthGetter(Class arrayType) { - if (arrayType.isArray() == false) { - throw new IllegalArgumentException("type must be an array"); - } - return (ARRAY_TYPE_MH_MAPPING.containsKey(arrayType)) - ? ARRAY_TYPE_MH_MAPPING.get(arrayType) - : OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); - } - - private ArrayLengthHelper() {} - } - /** pointer to Map.get(Object) */ private static final MethodHandle MAP_GET; /** pointer to Map.put(Object,Object) */ @@ -138,26 +57,29 @@ private ArrayLengthHelper() {} private static final MethodHandle LIST_GET; /** pointer to List.set(int,Object) */ private static final MethodHandle LIST_SET; - /** pointer to Iterable.iterator() */ - private static final MethodHandle ITERATOR; + /** pointer to new ObjectIterator(Iterable.iterator()) */ + private static final MethodHandle OBJECT_ITERATOR; /** pointer to {@link Def#mapIndexNormalize}. */ private static final MethodHandle MAP_INDEX_NORMALIZE; /** pointer to {@link Def#listIndexNormalize}. */ private static final MethodHandle LIST_INDEX_NORMALIZE; - /** factory for arraylength MethodHandle (intrinsic) from Java 9 (pkg-private for tests) */ - static final MethodHandle JAVA9_ARRAY_LENGTH_MH_FACTORY; + /** factory for arraylength MethodHandle (intrinsic) */ + private static final MethodHandle ARRAY_LENGTH; public static final Map, MethodHandle> DEF_TO_BOXED_TYPE_IMPLICIT_CAST; static { - final MethodHandles.Lookup methodHandlesLookup = MethodHandles.publicLookup(); + final MethodHandles.Lookup methodHandlesLookup = MethodHandles.lookup(); try { MAP_GET = methodHandlesLookup.findVirtual(Map.class, "get", MethodType.methodType(Object.class, Object.class)); MAP_PUT = methodHandlesLookup.findVirtual(Map.class, "put", MethodType.methodType(Object.class, Object.class, Object.class)); LIST_GET = methodHandlesLookup.findVirtual(List.class, "get", MethodType.methodType(Object.class, int.class)); LIST_SET = methodHandlesLookup.findVirtual(List.class, "set", MethodType.methodType(Object.class, int.class, Object.class)); - ITERATOR = methodHandlesLookup.findVirtual(Iterable.class, "iterator", MethodType.methodType(Iterator.class)); + OBJECT_ITERATOR = MethodHandles.filterReturnValue( + methodHandlesLookup.findVirtual(Iterable.class, "iterator", MethodType.methodType(Iterator.class)), + methodHandlesLookup.findConstructor(ObjectIterator.class, MethodType.methodType(void.class, Iterator.class)) + ); MAP_INDEX_NORMALIZE = methodHandlesLookup.findStatic( Def.class, "mapIndexNormalize", @@ -168,23 +90,14 @@ private ArrayLengthHelper() {} "listIndexNormalize", MethodType.methodType(int.class, List.class, int.class) ); - } catch (final ReflectiveOperationException roe) { - throw new AssertionError(roe); - } - - // lookup up the factory for arraylength MethodHandle (intrinsic) from Java 9: - // https://bugs.openjdk.java.net/browse/JDK-8156915 - MethodHandle arrayLengthMHFactory; - try { - arrayLengthMHFactory = methodHandlesLookup.findStatic( + ARRAY_LENGTH = methodHandlesLookup.findStatic( MethodHandles.class, "arrayLength", MethodType.methodType(MethodHandle.class, Class.class) ); - } catch (final ReflectiveOperationException roe) { - arrayLengthMHFactory = null; + } catch (ReflectiveOperationException roe) { + throw new AssertionError(roe); } - JAVA9_ARRAY_LENGTH_MH_FACTORY = arrayLengthMHFactory; Map, MethodHandle> defToBoxedTypeImplicitCast = new HashMap<>(); @@ -232,15 +145,11 @@ static void rethrow(Throwable t) throws T { /** Returns an array length getter MethodHandle for the given array type */ static MethodHandle arrayLengthGetter(Class arrayType) { - if (JAVA9_ARRAY_LENGTH_MH_FACTORY != null) { - try { - return (MethodHandle) JAVA9_ARRAY_LENGTH_MH_FACTORY.invokeExact(arrayType); - } catch (Throwable t) { - rethrow(t); - throw new AssertionError(t); - } - } else { - return ArrayLengthHelper.arrayLengthGetter(arrayType); + try { + return (MethodHandle) ARRAY_LENGTH.invokeExact(arrayType); + } catch (Throwable t) { + rethrow(t); + throw new AssertionError(t); } } @@ -658,9 +567,129 @@ static MethodHandle lookupArrayLoad(Class receiverClass) { ); } + private static ClassCastException castException(Class sourceClass, Class targetClass, Boolean implicit) { + return new ClassCastException( + Strings.format( + "cannot %scast def [%s] to %s", + implicit != null ? (implicit ? "implicitly " : "explicitly ") : "", + PainlessLookupUtility.typeToUnboxedType(sourceClass).getCanonicalName(), + targetClass.getCanonicalName() + ) + ); + } + + private abstract static class BaseIterator implements ValueIterator { + @Override + public boolean nextBoolean() { + Object next = next(); + try { + return (boolean) next; + } catch (ClassCastException e) { + throw castException(next.getClass(), boolean.class, null); + } + } + + @Override + public byte nextByte() { + Object next = next(); + try { + return ((Number) next).byteValue(); + } catch (ClassCastException e) { + throw castException(next.getClass(), byte.class, null); + } + } + + @Override + public short nextShort() { + Object next = next(); + try { + return ((Number) next).shortValue(); + } catch (ClassCastException e) { + throw castException(next.getClass(), short.class, null); + } + } + + @Override + public char nextChar() { + Object next = next(); + try { + return (char) next; + } catch (ClassCastException e) { + throw castException(next.getClass(), char.class, null); + } + } + + @Override + public int nextInt() { + Object next = next(); + try { + return ((Number) next).intValue(); + } catch (ClassCastException e) { + throw castException(next.getClass(), int.class, null); + } + } + + @Override + public long nextLong() { + Object next = next(); + try { + return ((Number) next).longValue(); + } catch (ClassCastException e) { + throw castException(next.getClass(), long.class, null); + } + } + + @Override + public float nextFloat() { + Object next = next(); + try { + return ((Number) next).floatValue(); + } catch (ClassCastException e) { + throw castException(next.getClass(), float.class, null); + } + } + + @Override + public double nextDouble() { + Object next = next(); + try { + return ((Number) next).doubleValue(); + } catch (ClassCastException e) { + throw castException(next.getClass(), double.class, null); + } + } + } + + private static class ObjectIterator extends BaseIterator { + private final Iterator iterator; + + ObjectIterator(Iterator iterator) { + this.iterator = iterator; + } + + @Override + public boolean hasNext() { + return iterator.hasNext(); + } + + @Override + public T next() { + return iterator.next(); + } + + @Override + public void remove() { + iterator.remove(); + } + + @Override + public void forEachRemaining(Consumer action) { + iterator.forEachRemaining(action); + } + } + /** Helper class for isolating MethodHandles and methods to get iterators over arrays - * (to emulate "enhanced for loop" using MethodHandles). These cause boxing, and are not as efficient - * as they could be, but works. + * (to emulate "enhanced for loop" using MethodHandles). */ @SuppressWarnings("unused") // iterator() methods are are actually used, javac just does not know :) private static final class ArrayIteratorHelper { @@ -682,7 +711,7 @@ private static final class ArrayIteratorHelper { return PRIVATE_METHOD_HANDLES_LOOKUP.findStatic( PRIVATE_METHOD_HANDLES_LOOKUP.lookupClass(), "iterator", - MethodType.methodType(Iterator.class, type) + MethodType.methodType(ValueIterator.class, type) ); } catch (ReflectiveOperationException e) { throw new AssertionError(e); @@ -692,8 +721,8 @@ private static final class ArrayIteratorHelper { private static final MethodHandle OBJECT_ARRAY_MH = ARRAY_TYPE_MH_MAPPING.get(Object[].class); - static Iterator iterator(final boolean[] array) { - return new Iterator() { + static ValueIterator iterator(final boolean[] array) { + return new BaseIterator() { int index = 0; @Override @@ -702,14 +731,19 @@ public boolean hasNext() { } @Override - public Boolean next() { + public boolean nextBoolean() { return array[index++]; } + + @Override + public Boolean next() { + return nextBoolean(); + } }; } - static Iterator iterator(final byte[] array) { - return new Iterator() { + static ValueIterator iterator(final byte[] array) { + return new BaseIterator() { int index = 0; @Override @@ -718,14 +752,49 @@ public boolean hasNext() { } @Override - public Byte next() { + public byte nextByte() { return array[index++]; } + + @Override + public short nextShort() { + return nextByte(); + } + + @Override + public char nextChar() { + return (char) nextByte(); + } + + @Override + public int nextInt() { + return nextByte(); + } + + @Override + public long nextLong() { + return nextByte(); + } + + @Override + public float nextFloat() { + return nextByte(); + } + + @Override + public double nextDouble() { + return nextByte(); + } + + @Override + public Byte next() { + return nextByte(); + } }; } - static Iterator iterator(final short[] array) { - return new Iterator() { + static ValueIterator iterator(final short[] array) { + return new BaseIterator() { int index = 0; @Override @@ -734,14 +803,49 @@ public boolean hasNext() { } @Override - public Short next() { + public byte nextByte() { + return (byte) nextShort(); + } + + @Override + public short nextShort() { return array[index++]; } + + @Override + public char nextChar() { + return (char) nextShort(); + } + + @Override + public int nextInt() { + return nextShort(); + } + + @Override + public long nextLong() { + return nextShort(); + } + + @Override + public float nextFloat() { + return nextShort(); + } + + @Override + public double nextDouble() { + return nextShort(); + } + + @Override + public Short next() { + return nextShort(); + } }; } - static Iterator iterator(final int[] array) { - return new Iterator() { + static ValueIterator iterator(final int[] array) { + return new BaseIterator() { int index = 0; @Override @@ -750,14 +854,49 @@ public boolean hasNext() { } @Override - public Integer next() { + public byte nextByte() { + return (byte) nextInt(); + } + + @Override + public short nextShort() { + return (short) nextInt(); + } + + @Override + public char nextChar() { + return (char) nextInt(); + } + + @Override + public int nextInt() { return array[index++]; } + + @Override + public long nextLong() { + return nextInt(); + } + + @Override + public float nextFloat() { + return nextInt(); + } + + @Override + public double nextDouble() { + return nextInt(); + } + + @Override + public Integer next() { + return nextInt(); + } }; } - static Iterator iterator(final long[] array) { - return new Iterator() { + static ValueIterator iterator(final long[] array) { + return new BaseIterator() { int index = 0; @Override @@ -766,14 +905,49 @@ public boolean hasNext() { } @Override - public Long next() { + public byte nextByte() { + return (byte) nextLong(); + } + + @Override + public short nextShort() { + return (short) nextLong(); + } + + @Override + public char nextChar() { + return (char) nextLong(); + } + + @Override + public int nextInt() { + return (int) nextLong(); + } + + @Override + public long nextLong() { return array[index++]; } + + @Override + public float nextFloat() { + return nextLong(); + } + + @Override + public double nextDouble() { + return nextLong(); + } + + @Override + public Long next() { + return nextLong(); + } }; } - static Iterator iterator(final char[] array) { - return new Iterator() { + static ValueIterator iterator(final char[] array) { + return new BaseIterator() { int index = 0; @Override @@ -782,14 +956,49 @@ public boolean hasNext() { } @Override - public Character next() { + public byte nextByte() { + return (byte) nextChar(); + } + + @Override + public short nextShort() { + return (short) nextChar(); + } + + @Override + public char nextChar() { return array[index++]; } + + @Override + public int nextInt() { + return nextChar(); + } + + @Override + public long nextLong() { + return nextChar(); + } + + @Override + public float nextFloat() { + return nextChar(); + } + + @Override + public double nextDouble() { + return nextChar(); + } + + @Override + public Character next() { + return nextChar(); + } }; } - static Iterator iterator(final float[] array) { - return new Iterator() { + static ValueIterator iterator(final float[] array) { + return new BaseIterator() { int index = 0; @Override @@ -798,14 +1007,49 @@ public boolean hasNext() { } @Override - public Float next() { + public byte nextByte() { + return (byte) nextFloat(); + } + + @Override + public short nextShort() { + return (short) nextFloat(); + } + + @Override + public char nextChar() { + return (char) nextFloat(); + } + + @Override + public int nextInt() { + return (int) nextFloat(); + } + + @Override + public long nextLong() { + return (long) nextFloat(); + } + + @Override + public float nextFloat() { return array[index++]; } + + @Override + public double nextDouble() { + return nextFloat(); + } + + @Override + public Float next() { + return nextFloat(); + } }; } - static Iterator iterator(final double[] array) { - return new Iterator() { + static ValueIterator iterator(final double[] array) { + return new BaseIterator() { int index = 0; @Override @@ -814,14 +1058,49 @@ public boolean hasNext() { } @Override - public Double next() { + public byte nextByte() { + return (byte) nextDouble(); + } + + @Override + public short nextShort() { + return (short) nextDouble(); + } + + @Override + public char nextChar() { + return (char) nextDouble(); + } + + @Override + public int nextInt() { + return (int) nextDouble(); + } + + @Override + public long nextLong() { + return (long) nextDouble(); + } + + @Override + public float nextFloat() { + return (float) nextDouble(); + } + + @Override + public double nextDouble() { return array[index++]; } + + @Override + public Double next() { + return nextDouble(); + } }; } - static Iterator iterator(final Object[] array) { - return new Iterator() { + static ValueIterator iterator(final Object[] array) { + return new BaseIterator() { int index = 0; @Override @@ -840,9 +1119,8 @@ static MethodHandle newIterator(Class arrayType) { if (arrayType.isArray() == false) { throw new IllegalArgumentException("type must be an array"); } - return (ARRAY_TYPE_MH_MAPPING.containsKey(arrayType)) - ? ARRAY_TYPE_MH_MAPPING.get(arrayType) - : OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); + MethodHandle iterator = ARRAY_TYPE_MH_MAPPING.get(arrayType); + return iterator != null ? iterator : OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); } private ArrayIteratorHelper() {} @@ -855,7 +1133,7 @@ private ArrayIteratorHelper() {} */ static MethodHandle lookupIterator(Class receiverClass) { if (Iterable.class.isAssignableFrom(receiverClass)) { - return ITERATOR; + return OBJECT_ITERATOR; } else if (receiverClass.isArray()) { return ArrayIteratorHelper.newIterator(receiverClass); } else { @@ -869,13 +1147,7 @@ public static boolean defToboolean(final Object value) { if (value instanceof Boolean) { return (boolean) value; } else { - throw new ClassCastException( - "cannot cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + boolean.class.getCanonicalName() - ); + throw castException(value.getClass(), boolean.class, null); } } @@ -883,13 +1155,7 @@ public static byte defTobyteImplicit(final Object value) { if (value instanceof Byte) { return (byte) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + byte.class.getCanonicalName() - ); + throw castException(value.getClass(), byte.class, true); } } @@ -899,13 +1165,7 @@ public static short defToshortImplicit(final Object value) { } else if (value instanceof Short) { return (short) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + short.class.getCanonicalName() - ); + throw castException(value.getClass(), short.class, true); } } @@ -913,13 +1173,7 @@ public static char defTocharImplicit(final Object value) { if (value instanceof Character) { return (char) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + char.class.getCanonicalName() - ); + throw castException(value.getClass(), char.class, true); } } @@ -933,13 +1187,7 @@ public static int defTointImplicit(final Object value) { } else if (value instanceof Integer) { return (int) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + int.class.getCanonicalName() - ); + throw castException(value.getClass(), int.class, true); } } @@ -955,13 +1203,7 @@ public static long defTolongImplicit(final Object value) { } else if (value instanceof Long) { return (long) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + long.class.getCanonicalName() - ); + throw castException(value.getClass(), long.class, true); } } @@ -979,13 +1221,7 @@ public static float defTofloatImplicit(final Object value) { } else if (value instanceof Float) { return (float) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + float.class.getCanonicalName() - ); + throw castException(value.getClass(), float.class, true); } } @@ -1005,13 +1241,7 @@ public static double defTodoubleImplicit(final Object value) { } else if (value instanceof Double) { return (double) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + double.class.getCanonicalName() - ); + throw castException(value.getClass(), double.class, true); } } @@ -1026,13 +1256,7 @@ public static byte defTobyteExplicit(final Object value) { || value instanceof Double) { return ((Number) value).byteValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + byte.class.getCanonicalName() - ); + throw castException(value.getClass(), byte.class, false); } } @@ -1047,13 +1271,7 @@ public static short defToshortExplicit(final Object value) { || value instanceof Double) { return ((Number) value).shortValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + short.class.getCanonicalName() - ); + throw castException(value.getClass(), short.class, false); } } @@ -1070,13 +1288,7 @@ public static char defTocharExplicit(final Object value) { || value instanceof Double) { return (char) ((Number) value).intValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + char.class.getCanonicalName() - ); + throw castException(value.getClass(), char.class, false); } } @@ -1091,13 +1303,7 @@ public static int defTointExplicit(final Object value) { || value instanceof Double) { return ((Number) value).intValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + int.class.getCanonicalName() - ); + throw castException(value.getClass(), int.class, false); } } @@ -1112,13 +1318,7 @@ public static long defTolongExplicit(final Object value) { || value instanceof Double) { return ((Number) value).longValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + long.class.getCanonicalName() - ); + throw castException(value.getClass(), long.class, false); } } @@ -1133,13 +1333,7 @@ public static float defTofloatExplicit(final Object value) { || value instanceof Double) { return ((Number) value).floatValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "float [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + byte.class.getCanonicalName() - ); + throw castException(value.getClass(), float.class, false); } } @@ -1154,13 +1348,7 @@ public static double defTodoubleExplicit(final Object value) { || value instanceof Double) { return ((Number) value).doubleValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + byte.class.getCanonicalName() - ); + throw castException(value.getClass(), byte.class, false); } } @@ -1172,13 +1360,7 @@ public static Boolean defToBoolean(final Object value) { } else if (value instanceof Boolean) { return (Boolean) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Boolean.class.getCanonicalName() - ); + throw castException(value.getClass(), Boolean.class, false); } } @@ -1188,13 +1370,7 @@ public static Byte defToByteImplicit(final Object value) { } else if (value instanceof Byte) { return (Byte) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Byte.class.getCanonicalName() - ); + throw castException(value.getClass(), Byte.class, false); } } @@ -1206,13 +1382,7 @@ public static Short defToShortImplicit(final Object value) { } else if (value instanceof Short) { return (Short) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Short.class.getCanonicalName() - ); + throw castException(value.getClass(), Short.class, false); } } @@ -1222,13 +1392,7 @@ public static Character defToCharacterImplicit(final Object value) { } else if (value instanceof Character) { return (Character) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Character.class.getCanonicalName() - ); + throw castException(value.getClass(), Character.class, false); } } @@ -1244,13 +1408,7 @@ public static Integer defToIntegerImplicit(final Object value) { } else if (value instanceof Integer) { return (Integer) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Integer.class.getCanonicalName() - ); + throw castException(value.getClass(), Integer.class, false); } } @@ -1268,13 +1426,7 @@ public static Long defToLongImplicit(final Object value) { } else if (value instanceof Long) { return (Long) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Long.class.getCanonicalName() - ); + throw castException(value.getClass(), Long.class, false); } } @@ -1294,13 +1446,7 @@ public static Float defToFloatImplicit(final Object value) { } else if (value instanceof Float) { return (Float) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Float.class.getCanonicalName() - ); + throw castException(value.getClass(), Float.class, false); } } @@ -1322,13 +1468,7 @@ public static Double defToDoubleImplicit(final Object value) { } else if (value instanceof Double) { return (Double) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Double.class.getCanonicalName() - ); + throw castException(value.getClass(), Double.class, false); } } @@ -1345,13 +1485,7 @@ public static Byte defToByteExplicit(final Object value) { || value instanceof Double) { return ((Number) value).byteValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Byte.class.getCanonicalName() - ); + throw castException(value.getClass(), Byte.class, false); } } @@ -1368,13 +1502,7 @@ public static Short defToShortExplicit(final Object value) { || value instanceof Double) { return ((Number) value).shortValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Short.class.getCanonicalName() - ); + throw castException(value.getClass(), Short.class, false); } } @@ -1393,13 +1521,7 @@ public static Character defToCharacterExplicit(final Object value) { || value instanceof Double) { return (char) ((Number) value).intValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Character.class.getCanonicalName() - ); + throw castException(value.getClass(), Character.class, false); } } @@ -1416,13 +1538,7 @@ public static Integer defToIntegerExplicit(final Object value) { || value instanceof Double) { return ((Number) value).intValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Integer.class.getCanonicalName() - ); + throw castException(value.getClass(), Integer.class, false); } } @@ -1439,13 +1555,7 @@ public static Long defToLongExplicit(final Object value) { || value instanceof Double) { return ((Number) value).longValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Long.class.getCanonicalName() - ); + throw castException(value.getClass(), Long.class, false); } } @@ -1462,13 +1572,7 @@ public static Float defToFloatExplicit(final Object value) { || value instanceof Double) { return ((Number) value).floatValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Float.class.getCanonicalName() - ); + throw castException(value.getClass(), Float.class, false); } } @@ -1485,13 +1589,7 @@ public static Double defToDoubleExplicit(final Object value) { || value instanceof Double) { return ((Number) value).doubleValue(); } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + Double.class.getCanonicalName() - ); + throw castException(value.getClass(), Double.class, false); } } @@ -1501,13 +1599,7 @@ public static String defToStringImplicit(final Object value) { } else if (value instanceof String) { return (String) value; } else { - throw new ClassCastException( - "cannot implicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + String.class.getCanonicalName() - ); + throw castException(value.getClass(), String.class, true); } } @@ -1519,13 +1611,7 @@ public static String defToStringExplicit(final Object value) { } else if (value instanceof String) { return (String) value; } else { - throw new ClassCastException( - "cannot explicitly cast " - + "def [" - + PainlessLookupUtility.typeToUnboxedType(value.getClass()).getCanonicalName() - + "] to " - + String.class.getCanonicalName() - ); + throw castException(value.getClass(), String.class, false); } } @@ -1616,9 +1702,8 @@ static MethodHandle arrayIndexNormalizer(Class arrayType) { if (arrayType.isArray() == false) { throw new IllegalArgumentException("type must be an array"); } - return (ARRAY_TYPE_MH_MAPPING.containsKey(arrayType)) - ? ARRAY_TYPE_MH_MAPPING.get(arrayType) - : OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); + MethodHandle handle = ARRAY_TYPE_MH_MAPPING.get(arrayType); + return handle != null ? handle : OBJECT_ARRAY_MH.asType(OBJECT_ARRAY_MH.type().changeParameterType(0, arrayType)); } private ArrayIndexNormalizeHelper() {} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java index 135673010c6f..3d35c5d0ab5e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefBootstrap.java @@ -382,13 +382,13 @@ Object fallback(Object[] args) throws Throwable { } else if (type.parameterType(0) != Object.class) { // case 2: only the argument is unknown, just check that MethodType testType = MethodType.methodType(boolean.class, type); - MethodHandle unaryTest = CHECK_RHS.bindTo(clazz0).bindTo(clazz1); - test = unaryTest.asType(testType); + MethodHandle unaryTest = CHECK_RHS.bindTo(clazz1); + test = MethodHandles.dropArguments(unaryTest, 0, Object.class).asType(testType); nullCheck = MethodHandles.dropArguments(NON_NULL, 0, clazz0).asType(testType); } else { // case 3: check both receiver and argument MethodType testType = MethodType.methodType(boolean.class, type); - MethodHandle binaryTest = CHECK_BOTH.bindTo(clazz0).bindTo(clazz1); + MethodHandle binaryTest = MethodHandles.insertArguments(CHECK_BOTH, 0, clazz0, clazz1); test = binaryTest.asType(testType); nullCheck = BOTH_NON_NULL.asType(testType); } @@ -423,7 +423,7 @@ static boolean checkLHS(Class clazz, Object leftObject) { * guard method for inline caching: checks the first argument is the same * as the cached first argument. */ - static boolean checkRHS(Class left, Class right, Object leftObject, Object rightObject) { + static boolean checkRHS(Class right, Object rightObject) { return rightObject.getClass() == right; } @@ -460,7 +460,7 @@ static boolean bothNonNull(Object leftObject, Object rightObject) { CHECK_RHS = methodHandlesLookup.findStatic( methodHandlesLookup.lookupClass(), "checkRHS", - MethodType.methodType(boolean.class, Class.class, Class.class, Object.class, Object.class) + MethodType.methodType(boolean.class, Class.class, Object.class) ); CHECK_BOTH = methodHandlesLookup.findStatic( methodHandlesLookup.lookupClass(), diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java index 0a29ff80a45b..1132b1163565 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/DefMath.java @@ -526,8 +526,12 @@ private static boolean eq(boolean a, boolean b) { } private static boolean eq(Object left, Object right) { - if (left != null && right != null) { - if (left instanceof Double) { + if (left == right) { + return true; + } else if (left != null && right != null) { + if (left.getClass() == right.getClass()) { + return left.equals(right); + } else if (left instanceof Double) { if (right instanceof Number) { return (double) left == ((Number) right).doubleValue(); } else if (right instanceof Character) { @@ -537,7 +541,7 @@ private static boolean eq(Object left, Object right) { if (left instanceof Number) { return ((Number) left).doubleValue() == (double) right; } else if (left instanceof Character) { - return (char) left == ((Number) right).doubleValue(); + return (char) left == (double) right; } } else if (left instanceof Float) { if (right instanceof Number) { @@ -549,7 +553,7 @@ private static boolean eq(Object left, Object right) { if (left instanceof Number) { return ((Number) left).floatValue() == (float) right; } else if (left instanceof Character) { - return (char) left == ((Number) right).floatValue(); + return (char) left == (float) right; } } else if (left instanceof Long) { if (right instanceof Number) { @@ -561,7 +565,7 @@ private static boolean eq(Object left, Object right) { if (left instanceof Number) { return ((Number) left).longValue() == (long) right; } else if (left instanceof Character) { - return (char) left == ((Number) right).longValue(); + return (char) left == (long) right; } } else if (left instanceof Number) { if (right instanceof Number) { @@ -578,7 +582,7 @@ private static boolean eq(Object left, Object right) { return left.equals(right); } - return left == null && right == null; + return false; } // comparison operators: applicable for any numeric type diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java index 816825a7700d..4da523bce634 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionRef.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.core.Strings; import org.elasticsearch.painless.lookup.PainlessConstructor; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.lookup.PainlessLookupUtility; @@ -22,7 +23,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.painless.WriterConstants.CLASS_NAME; import static org.objectweb.asm.Opcodes.H_INVOKEINTERFACE; @@ -73,14 +74,12 @@ public static FunctionRef create( if (interfaceMethod == null) { throw new IllegalArgumentException( - "cannot convert function reference [" - + typeName - + "::" - + methodName - + "] " - + "to a non-functional interface [" - + targetClassName - + "]" + Strings.format( + "cannot convert function reference [%s::%s] to a non-functional interface [%s]", + typeName, + methodName, + targetClassName + ) ); } @@ -110,18 +109,14 @@ public static FunctionRef create( if (localFunction == null) { throw new IllegalArgumentException( - "function reference [this::" - + localFunctionKey - + "] " - + "matching [" - + targetClassName - + ", " - + interfaceMethodName - + "/" - + interfaceTypeParametersSize - + "] " - + "not found" - + (localFunctionKey.contains("$") ? " due to an incorrect number of arguments" : "") + Strings.format( + "function reference [this::%s] matching [%s, %s/%d] not found%s", + localFunctionKey, + targetClassName, + interfaceMethodName, + interfaceTypeParametersSize, + localFunctionKey.contains("$") ? " due to an incorrect number of arguments" : "" + ) ); } @@ -144,19 +139,14 @@ public static FunctionRef create( if (painlessConstructor == null) { throw new IllegalArgumentException( - "function reference [" - + typeName - + "::new/" - + interfaceTypeParametersSize - + "] " - + "matching [" - + targetClassName - + ", " - + interfaceMethodName - + "/" - + interfaceTypeParametersSize - + "] " - + "not found" + Strings.format( + "function reference [%s::new/%d] matching [%s, %s/%d] not found", + typeName, + interfaceTypeParametersSize, + targetClassName, + interfaceMethodName, + interfaceTypeParametersSize + ) ); } @@ -193,35 +183,25 @@ public static FunctionRef create( if (painlessMethod == null) { throw new IllegalArgumentException( - "function reference " - + "[" - + typeName - + "::" - + methodName - + "/" - + interfaceTypeParametersSize - + "] " - + "matching [" - + targetClassName - + ", " - + interfaceMethodName - + "/" - + interfaceTypeParametersSize - + "] " - + "not found" + Strings.format( + "function reference [%s::%s/%d] matching [%s, %s/%d] not found", + typeName, + methodName, + interfaceTypeParametersSize, + targetClassName, + interfaceMethodName, + interfaceTypeParametersSize + ) ); } } else if (captured) { throw new IllegalArgumentException( - "cannot use a static method as a function reference " - + "[" - + typeName - + "::" - + methodName - + "/" - + interfaceTypeParametersSize - + "] " - + "with a non-static captured variable" + Strings.format( + "cannot use a static method as a function reference [%s::%s/%d] with a non-static captured variable", + typeName, + methodName, + interfaceTypeParametersSize + ) ); } @@ -360,19 +340,20 @@ public String getFactoryMethodDescriptor() { if (factoryMethodReceiver == null) { return factoryMethodType.toMethodDescriptorString(); } - List arguments = factoryMethodType.parameterList().stream().map(Type::getType).collect(Collectors.toList()); - arguments.add(0, factoryMethodReceiver); - Type[] argArray = new Type[arguments.size()]; - arguments.toArray(argArray); - return Type.getMethodDescriptor(Type.getType(factoryMethodType.returnType()), argArray); + Type[] arguments = Stream.concat(Stream.of(factoryMethodReceiver), factoryMethodType.parameterList().stream().map(Type::getType)) + .toArray(Type[]::new); + return Type.getMethodDescriptor(Type.getType(factoryMethodType.returnType()), arguments); } /** Get the factory method type, updating the receiver if {@code factoryMethodReceiverClass} is non-null */ public Class[] factoryMethodParameters(Class factoryMethodReceiverClass) { - List> parameters = new ArrayList<>(factoryMethodType.parameterList()); + Class[] parameters = factoryMethodType.parameterList().toArray(Class[]::new); if (factoryMethodReceiverClass != null) { - parameters.add(0, factoryMethodReceiverClass); + Class[] withReceiver = new Class[parameters.length + 1]; + withReceiver[0] = factoryMethodReceiverClass; + System.arraycopy(parameters, 0, withReceiver, 1, parameters.length); + parameters = withReceiver; } - return parameters.toArray(new Class[0]); + return parameters; } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java index 54bfa3c86c71..a2fa5dea229d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java @@ -24,7 +24,6 @@ import java.security.AccessController; import java.security.PrivilegedAction; import java.util.List; -import java.util.stream.Collectors; import static java.lang.invoke.MethodHandles.Lookup; import static org.elasticsearch.painless.WriterConstants.CLASS_VERSION; @@ -400,9 +399,9 @@ private static void generateInterfaceMethod( iface.visitCode(); // Loads any captured variables onto the stack. - for (int captureCount = 0; captureCount < captures.length; ++captureCount) { + for (Capture capture : captures) { iface.loadThis(); - iface.getField(lambdaClassType, captures[captureCount].name, captures[captureCount].type); + iface.getField(lambdaClassType, capture.name, capture.type); } // Loads any passed in arguments onto the stack. @@ -442,13 +441,16 @@ private static void generateInterfaceMethod( delegateClassType = Type.getType(clazz); // functionalInterfaceWithCaptures needs to add the receiver and other captures - List parameters = interfaceMethodType.parameterList().stream().map(Type::getType).collect(Collectors.toList()); - parameters.add(0, delegateClassType); + Type[] parameters = new Type[captures.length + interfaceMethodType.parameterList().size()]; + int p = 0; + parameters[p++] = delegateClassType; for (int i = 1; i < captures.length; i++) { - parameters.add(i, captures[i].type); + parameters[p++] = captures[i].type; + } + for (Class pCls : interfaceMethodType.parameterList()) { + parameters[p++] = Type.getType(pCls); } - Type[] parametersArray = parameters.toArray(new Type[0]); - functionalInterfaceWithCaptures = Type.getMethodDescriptor(Type.getType(interfaceMethodType.returnType()), parametersArray); + functionalInterfaceWithCaptures = Type.getMethodDescriptor(Type.getType(interfaceMethodType.returnType()), parameters); // delegateMethod does not need the receiver List> factoryParameters = factoryMethodType.parameterList(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index 878ff0e78b61..1e88edc788c6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -61,21 +61,10 @@ import static org.elasticsearch.painless.WriterConstants.DEF_TO_STRING_EXPLICIT; import static org.elasticsearch.painless.WriterConstants.DEF_TO_STRING_IMPLICIT; import static org.elasticsearch.painless.WriterConstants.DEF_UTIL_TYPE; -import static org.elasticsearch.painless.WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE; import static org.elasticsearch.painless.WriterConstants.LAMBDA_BOOTSTRAP_HANDLE; -import static org.elasticsearch.painless.WriterConstants.MAX_INDY_STRING_CONCAT_ARGS; +import static org.elasticsearch.painless.WriterConstants.MAX_STRING_CONCAT_ARGS; import static org.elasticsearch.painless.WriterConstants.PAINLESS_ERROR_TYPE; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_BOOLEAN; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_CHAR; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_DOUBLE; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_FLOAT; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_INT; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_LONG; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_OBJECT; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_APPEND_STRING; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_CONSTRUCTOR; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_TOSTRING; -import static org.elasticsearch.painless.WriterConstants.STRINGBUILDER_TYPE; +import static org.elasticsearch.painless.WriterConstants.STRING_CONCAT_BOOTSTRAP_HANDLE; import static org.elasticsearch.painless.WriterConstants.STRING_TO_CHAR; import static org.elasticsearch.painless.WriterConstants.STRING_TYPE; import static org.elasticsearch.painless.WriterConstants.UTILITY_TYPE; @@ -90,7 +79,7 @@ public final class MethodWriter extends GeneratorAdapter { private final BitSet statements; private final CompilerSettings settings; - private final Deque> stringConcatArgs = (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE == null) ? null : new ArrayDeque<>(); + private final Deque> stringConcatArgs = new ArrayDeque<>(); public MethodWriter(int access, Method method, ClassVisitor cw, BitSet statements, CompilerSettings settings) { super( @@ -266,57 +255,28 @@ public static Type getType(Class clazz) { /** Starts a new string concat. * @return the size of arguments pushed to stack (the object that does string concats, e.g. a StringBuilder) */ - public int writeNewStrings() { - if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { - // Java 9+: we just push our argument collector onto deque - stringConcatArgs.push(new ArrayList<>()); - return 0; // nothing added to stack - } else { - // Java 8: create a StringBuilder in bytecode - newInstance(STRINGBUILDER_TYPE); - dup(); - invokeConstructor(STRINGBUILDER_TYPE, STRINGBUILDER_CONSTRUCTOR); - return 1; // StringBuilder on stack - } + public List writeNewStrings() { + List list = new ArrayList<>(); + stringConcatArgs.push(list); + return list; } public void writeAppendStrings(Class clazz) { - if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { - // Java 9+: record type information - stringConcatArgs.peek().add(getType(clazz)); - // prevent too many concat args. - // If there are too many, do the actual concat: - if (stringConcatArgs.peek().size() >= MAX_INDY_STRING_CONCAT_ARGS) { - writeToStrings(); - writeNewStrings(); - // add the return value type as new first param for next concat: - stringConcatArgs.peek().add(STRING_TYPE); - } - } else { - // Java 8: push a StringBuilder append - if (clazz == boolean.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_BOOLEAN); - else if (clazz == char.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_CHAR); - else if (clazz == byte.class || clazz == short.class || clazz == int.class) invokeVirtual( - STRINGBUILDER_TYPE, - STRINGBUILDER_APPEND_INT - ); - else if (clazz == long.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_LONG); - else if (clazz == float.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_FLOAT); - else if (clazz == double.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_DOUBLE); - else if (clazz == String.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_STRING); - else invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_OBJECT); + List currentConcat = stringConcatArgs.peek(); + currentConcat.add(getType(clazz)); + // prevent too many concat args. + // If there are too many, do the actual concat: + if (currentConcat.size() >= MAX_STRING_CONCAT_ARGS) { + writeToStrings(); + currentConcat = writeNewStrings(); + // add the return value type as new first param for next concat: + currentConcat.add(STRING_TYPE); } } public void writeToStrings() { - if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { - // Java 9+: use type information and push invokeDynamic - final String desc = Type.getMethodDescriptor(STRING_TYPE, stringConcatArgs.pop().stream().toArray(Type[]::new)); - invokeDynamic("concat", desc, INDY_STRING_CONCAT_BOOTSTRAP_HANDLE); - } else { - // Java 8: call toString() on StringBuilder - invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_TOSTRING); - } + final String desc = Type.getMethodDescriptor(STRING_TYPE, stringConcatArgs.pop().toArray(Type[]::new)); + invokeDynamic("concat", desc, STRING_CONCAT_BOOTSTRAP_HANDLE); } /** Writes a dynamic binary instruction: returnType, lhs, and rhs can be different */ diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index dfb547cbff0c..f3b7803bf2c5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -14,7 +14,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -147,7 +147,7 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { // this is a hack to bind the painless script engine in guice (all components are added to guice), so that // the painless context api. this is a temporary measure until transport actions do no require guice diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java index b30d45733cec..9207e0a55673 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/WriterConstants.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless; +import org.elasticsearch.painless.api.ValueIterator; import org.objectweb.asm.Handle; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; @@ -17,10 +18,9 @@ import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; -import java.util.ArrayList; +import java.lang.invoke.StringConcatFactory; import java.util.Collection; import java.util.Iterator; -import java.util.List; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -49,8 +49,17 @@ public final class WriterConstants { public static final MethodType NEEDS_PARAMETER_METHOD_TYPE = MethodType.methodType(boolean.class); public static final Type ITERATOR_TYPE = Type.getType(Iterator.class); + public static final Type VALUE_ITERATOR_TYPE = Type.getType(ValueIterator.class); public static final Method ITERATOR_HASNEXT = getAsmMethod(boolean.class, "hasNext"); public static final Method ITERATOR_NEXT = getAsmMethod(Object.class, "next"); + public static final Method VALUE_ITERATOR_NEXT_BOOLEAN = getAsmMethod(boolean.class, "nextBoolean"); + public static final Method VALUE_ITERATOR_NEXT_BYTE = getAsmMethod(byte.class, "nextByte"); + public static final Method VALUE_ITERATOR_NEXT_SHORT = getAsmMethod(short.class, "nextShort"); + public static final Method VALUE_ITERATOR_NEXT_CHAR = getAsmMethod(char.class, "nextChar"); + public static final Method VALUE_ITERATOR_NEXT_INT = getAsmMethod(int.class, "nextInt"); + public static final Method VALUE_ITERATOR_NEXT_LONG = getAsmMethod(long.class, "nextLong"); + public static final Method VALUE_ITERATOR_NEXT_FLOAT = getAsmMethod(float.class, "nextFloat"); + public static final Method VALUE_ITERATOR_NEXT_DOUBLE = getAsmMethod(double.class, "nextDouble"); public static final Type UTILITY_TYPE = Type.getType(Utility.class); public static final Method STRING_TO_CHAR = getAsmMethod(char.class, "StringTochar", String.class); @@ -163,39 +172,23 @@ public final class WriterConstants { false ); - /** dynamic invokedynamic bootstrap for indy string concats (Java 9+) */ - public static final Handle INDY_STRING_CONCAT_BOOTSTRAP_HANDLE; - static { - Handle bs; - try { - final Class factory = Class.forName("java.lang.invoke.StringConcatFactory"); - final String methodName = "makeConcat"; - final MethodType type = MethodType.methodType(CallSite.class, MethodHandles.Lookup.class, String.class, MethodType.class); - // ensure it is there: - MethodHandles.publicLookup().findStatic(factory, methodName, type); - bs = new Handle(Opcodes.H_INVOKESTATIC, Type.getInternalName(factory), methodName, type.toMethodDescriptorString(), false); - } catch (ReflectiveOperationException e) { - // not Java 9 - we set it null, so MethodWriter uses StringBuilder: - bs = null; - } - INDY_STRING_CONCAT_BOOTSTRAP_HANDLE = bs; - } + public static final MethodType MAKE_CONCAT_TYPE = MethodType.methodType( + CallSite.class, + MethodHandles.Lookup.class, + String.class, + MethodType.class + ); + public static final Handle STRING_CONCAT_BOOTSTRAP_HANDLE = new Handle( + Opcodes.H_INVOKESTATIC, + Type.getInternalName(StringConcatFactory.class), + "makeConcat", + MAKE_CONCAT_TYPE.toMethodDescriptorString(), + false + ); - public static final int MAX_INDY_STRING_CONCAT_ARGS = 200; + public static final int MAX_STRING_CONCAT_ARGS = 200; public static final Type STRING_TYPE = Type.getType(String.class); - public static final Type STRINGBUILDER_TYPE = Type.getType(StringBuilder.class); - - public static final Method STRINGBUILDER_CONSTRUCTOR = getAsmMethod(void.class, CTOR_METHOD_NAME); - public static final Method STRINGBUILDER_APPEND_BOOLEAN = getAsmMethod(StringBuilder.class, "append", boolean.class); - public static final Method STRINGBUILDER_APPEND_CHAR = getAsmMethod(StringBuilder.class, "append", char.class); - public static final Method STRINGBUILDER_APPEND_INT = getAsmMethod(StringBuilder.class, "append", int.class); - public static final Method STRINGBUILDER_APPEND_LONG = getAsmMethod(StringBuilder.class, "append", long.class); - public static final Method STRINGBUILDER_APPEND_FLOAT = getAsmMethod(StringBuilder.class, "append", float.class); - public static final Method STRINGBUILDER_APPEND_DOUBLE = getAsmMethod(StringBuilder.class, "append", double.class); - public static final Method STRINGBUILDER_APPEND_STRING = getAsmMethod(StringBuilder.class, "append", String.class); - public static final Method STRINGBUILDER_APPEND_OBJECT = getAsmMethod(StringBuilder.class, "append", Object.class); - public static final Method STRINGBUILDER_TOSTRING = getAsmMethod(String.class, "toString"); public static final Type OBJECTS_TYPE = Type.getType(Objects.class); public static final Method EQUALS = getAsmMethod(boolean.class, "equals", Object.class, Object.class); @@ -203,12 +196,6 @@ public final class WriterConstants { public static final Type COLLECTION_TYPE = Type.getType(Collection.class); public static final Method COLLECTION_SIZE = getAsmMethod(int.class, "size"); - public static final Type LIST_TYPE = Type.getType(List.class); - public static final Method LIST_ADD = getAsmMethod(boolean.class, "add", Object.class); - - public static final Type ARRAY_LIST_TYPE = Type.getType(ArrayList.class); - public static final Method ARRAY_LIST_CTOR_WITH_SIZE = getAsmMethod(void.class, CTOR_METHOD_NAME, int.class); - private static Method getAsmMethod(final Class rtype, final String name, final Class... ptypes) { return new Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 6a3d54e73497..1c50b3558dcd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -557,7 +558,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService BooleanFieldScript.LeafFactory leafFactory = factory.newFactory( BooleanFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); BooleanFieldScript booleanFieldScript = leafFactory.newInstance(leafReaderContext); List booleans = new ArrayList<>(); @@ -571,7 +573,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService DateFieldScript.CONTEXT.name, request.getScript().getParams(), context.lookup(), - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER + DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + OnScriptError.FAIL ); DateFieldScript dateFieldScript = leafFactory.newInstance(leafReaderContext); List dates = new ArrayList<>(); @@ -584,7 +587,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService DoubleFieldScript.LeafFactory leafFactory = factory.newFactory( DoubleFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); DoubleFieldScript doubleFieldScript = leafFactory.newInstance(leafReaderContext); List doubles = new ArrayList<>(); @@ -597,7 +601,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService GeoPointFieldScript.LeafFactory leafFactory = factory.newFactory( GeoPointFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); GeoPointFieldScript geoPointFieldScript = leafFactory.newInstance(leafReaderContext); List points = new ArrayList<>(); @@ -615,7 +620,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService IpFieldScript.LeafFactory leafFactory = factory.newFactory( IpFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); IpFieldScript ipFieldScript = leafFactory.newInstance(leafReaderContext); List ips = new ArrayList<>(); @@ -634,7 +640,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService LongFieldScript.LeafFactory leafFactory = factory.newFactory( LongFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); LongFieldScript longFieldScript = leafFactory.newInstance(leafReaderContext); List longs = new ArrayList<>(); @@ -647,7 +654,8 @@ static Response innerShardOperation(Request request, ScriptService scriptService StringFieldScript.LeafFactory leafFactory = factory.newFactory( StringFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); StringFieldScript stringFieldScript = leafFactory.newInstance(leafReaderContext); List keywords = new ArrayList<>(); @@ -660,10 +668,12 @@ static Response innerShardOperation(Request request, ScriptService scriptService CompositeFieldScript.LeafFactory leafFactory = factory.newFactory( CompositeFieldScript.CONTEXT.name, request.getScript().getParams(), - context.lookup() + context.lookup(), + OnScriptError.FAIL ); CompositeFieldScript compositeFieldScript = leafFactory.newInstance(leafReaderContext); - return new Response(compositeFieldScript.runForDoc(0)); + compositeFieldScript.runForDoc(0); + return new Response(compositeFieldScript.getFieldValues()); }, indexService); } else { throw new UnsupportedOperationException("unsupported context [" + scriptContext.name + "]"); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index 4e810af6b239..983327eac2d0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -1,17 +1,22 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; -import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.*; -@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast" }) +@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) abstract class PainlessLexer extends Lexer { static { - RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -25,269 +30,284 @@ abstract class PainlessLexer extends Lexer { 68, AOR = 69, ALSH = 70, ARSH = 71, AUSH = 72, OCTAL = 73, HEX = 74, INTEGER = 75, DECIMAL = 76, STRING = 77, REGEX = 78, TRUE = 79, FALSE = 80, NULL = 81, PRIMITIVE = 82, DEF = 83, ID = 84, DOTINTEGER = 85, DOTID = 86; public static final int AFTER_DOT = 1; + public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; + public static String[] modeNames = { "DEFAULT_MODE", "AFTER_DOT" }; - public static final String[] ruleNames = { - "WS", - "COMMENT", - "LBRACK", - "RBRACK", - "LBRACE", - "RBRACE", - "LP", - "RP", - "DOLLAR", - "DOT", - "NSDOT", - "COMMA", - "SEMICOLON", - "IF", - "IN", - "ELSE", - "WHILE", - "DO", - "FOR", - "CONTINUE", - "BREAK", - "RETURN", - "NEW", - "TRY", - "CATCH", - "THROW", - "THIS", - "INSTANCEOF", - "BOOLNOT", - "BWNOT", - "MUL", - "DIV", - "REM", - "ADD", - "SUB", - "LSH", - "RSH", - "USH", - "LT", - "LTE", - "GT", - "GTE", - "EQ", - "EQR", - "NE", - "NER", - "BWAND", - "XOR", - "BWOR", - "BOOLAND", - "BOOLOR", - "COND", - "COLON", - "ELVIS", - "REF", - "ARROW", - "FIND", - "MATCH", - "INCR", - "DECR", - "ASSIGN", - "AADD", - "ASUB", - "AMUL", - "ADIV", - "AREM", - "AAND", - "AXOR", - "AOR", - "ALSH", - "ARSH", - "AUSH", - "OCTAL", - "HEX", - "INTEGER", - "DECIMAL", - "STRING", - "REGEX", - "TRUE", - "FALSE", - "NULL", - "PRIMITIVE", - "DEF", - "ID", - "DOTINTEGER", - "DOTID" }; + private static String[] makeRuleNames() { + return new String[] { + "WS", + "COMMENT", + "LBRACK", + "RBRACK", + "LBRACE", + "RBRACE", + "LP", + "RP", + "DOLLAR", + "DOT", + "NSDOT", + "COMMA", + "SEMICOLON", + "IF", + "IN", + "ELSE", + "WHILE", + "DO", + "FOR", + "CONTINUE", + "BREAK", + "RETURN", + "NEW", + "TRY", + "CATCH", + "THROW", + "THIS", + "INSTANCEOF", + "BOOLNOT", + "BWNOT", + "MUL", + "DIV", + "REM", + "ADD", + "SUB", + "LSH", + "RSH", + "USH", + "LT", + "LTE", + "GT", + "GTE", + "EQ", + "EQR", + "NE", + "NER", + "BWAND", + "XOR", + "BWOR", + "BOOLAND", + "BOOLOR", + "COND", + "COLON", + "ELVIS", + "REF", + "ARROW", + "FIND", + "MATCH", + "INCR", + "DECR", + "ASSIGN", + "AADD", + "ASUB", + "AMUL", + "ADIV", + "AREM", + "AAND", + "AXOR", + "AOR", + "ALSH", + "ARSH", + "AUSH", + "OCTAL", + "HEX", + "INTEGER", + "DECIMAL", + "STRING", + "REGEX", + "TRUE", + "FALSE", + "NULL", + "PRIMITIVE", + "DEF", + "ID", + "DOTINTEGER", + "DOTID" }; + } + + public static final String[] ruleNames = makeRuleNames(); + + private static String[] makeLiteralNames() { + return new String[] { + null, + null, + null, + "'{'", + "'}'", + "'['", + "']'", + "'('", + "')'", + "'$'", + "'.'", + "'?.'", + "','", + "';'", + "'if'", + "'in'", + "'else'", + "'while'", + "'do'", + "'for'", + "'continue'", + "'break'", + "'return'", + "'new'", + "'try'", + "'catch'", + "'throw'", + "'this'", + "'instanceof'", + "'!'", + "'~'", + "'*'", + "'/'", + "'%'", + "'+'", + "'-'", + "'<<'", + "'>>'", + "'>>>'", + "'<'", + "'<='", + "'>'", + "'>='", + "'=='", + "'==='", + "'!='", + "'!=='", + "'&'", + "'^'", + "'|'", + "'&&'", + "'||'", + "'?'", + "':'", + "'?:'", + "'::'", + "'->'", + "'=~'", + "'==~'", + "'++'", + "'--'", + "'='", + "'+='", + "'-='", + "'*='", + "'/='", + "'%='", + "'&='", + "'^='", + "'|='", + "'<<='", + "'>>='", + "'>>>='", + null, + null, + null, + null, + null, + null, + "'true'", + "'false'", + "'null'", + null, + "'def'" }; + } + + private static final String[] _LITERAL_NAMES = makeLiteralNames(); - private static final String[] _LITERAL_NAMES = { - null, - null, - null, - "'{'", - "'}'", - "'['", - "']'", - "'('", - "')'", - "'$'", - "'.'", - "'?.'", - "','", - "';'", - "'if'", - "'in'", - "'else'", - "'while'", - "'do'", - "'for'", - "'continue'", - "'break'", - "'return'", - "'new'", - "'try'", - "'catch'", - "'throw'", - "'this'", - "'instanceof'", - "'!'", - "'~'", - "'*'", - "'/'", - "'%'", - "'+'", - "'-'", - "'<<'", - "'>>'", - "'>>>'", - "'<'", - "'<='", - "'>'", - "'>='", - "'=='", - "'==='", - "'!='", - "'!=='", - "'&'", - "'^'", - "'|'", - "'&&'", - "'||'", - "'?'", - "':'", - "'?:'", - "'::'", - "'->'", - "'=~'", - "'==~'", - "'++'", - "'--'", - "'='", - "'+='", - "'-='", - "'*='", - "'/='", - "'%='", - "'&='", - "'^='", - "'|='", - "'<<='", - "'>>='", - "'>>>='", - null, - null, - null, - null, - null, - null, - "'true'", - "'false'", - "'null'", - null, - "'def'" }; - private static final String[] _SYMBOLIC_NAMES = { - null, - "WS", - "COMMENT", - "LBRACK", - "RBRACK", - "LBRACE", - "RBRACE", - "LP", - "RP", - "DOLLAR", - "DOT", - "NSDOT", - "COMMA", - "SEMICOLON", - "IF", - "IN", - "ELSE", - "WHILE", - "DO", - "FOR", - "CONTINUE", - "BREAK", - "RETURN", - "NEW", - "TRY", - "CATCH", - "THROW", - "THIS", - "INSTANCEOF", - "BOOLNOT", - "BWNOT", - "MUL", - "DIV", - "REM", - "ADD", - "SUB", - "LSH", - "RSH", - "USH", - "LT", - "LTE", - "GT", - "GTE", - "EQ", - "EQR", - "NE", - "NER", - "BWAND", - "XOR", - "BWOR", - "BOOLAND", - "BOOLOR", - "COND", - "COLON", - "ELVIS", - "REF", - "ARROW", - "FIND", - "MATCH", - "INCR", - "DECR", - "ASSIGN", - "AADD", - "ASUB", - "AMUL", - "ADIV", - "AREM", - "AAND", - "AXOR", - "AOR", - "ALSH", - "ARSH", - "AUSH", - "OCTAL", - "HEX", - "INTEGER", - "DECIMAL", - "STRING", - "REGEX", - "TRUE", - "FALSE", - "NULL", - "PRIMITIVE", - "DEF", - "ID", - "DOTINTEGER", - "DOTID" }; + private static String[] makeSymbolicNames() { + return new String[] { + null, + "WS", + "COMMENT", + "LBRACK", + "RBRACK", + "LBRACE", + "RBRACE", + "LP", + "RP", + "DOLLAR", + "DOT", + "NSDOT", + "COMMA", + "SEMICOLON", + "IF", + "IN", + "ELSE", + "WHILE", + "DO", + "FOR", + "CONTINUE", + "BREAK", + "RETURN", + "NEW", + "TRY", + "CATCH", + "THROW", + "THIS", + "INSTANCEOF", + "BOOLNOT", + "BWNOT", + "MUL", + "DIV", + "REM", + "ADD", + "SUB", + "LSH", + "RSH", + "USH", + "LT", + "LTE", + "GT", + "GTE", + "EQ", + "EQR", + "NE", + "NER", + "BWAND", + "XOR", + "BWOR", + "BOOLAND", + "BOOLOR", + "COND", + "COLON", + "ELVIS", + "REF", + "ARROW", + "FIND", + "MATCH", + "INCR", + "DECR", + "ASSIGN", + "AADD", + "ASUB", + "AMUL", + "ADIV", + "AREM", + "AAND", + "AXOR", + "AOR", + "ALSH", + "ARSH", + "AUSH", + "OCTAL", + "HEX", + "INTEGER", + "DECIMAL", + "STRING", + "REGEX", + "TRUE", + "FALSE", + "NULL", + "PRIMITIVE", + "DEF", + "ID", + "DOTINTEGER", + "DOTID" }; + } + + private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); /** @@ -344,6 +364,11 @@ public String getSerializedATN() { return _serializedATN; } + @Override + public String[] getChannelNames() { + return channelNames; + } + @Override public String[] getModeNames() { return modeNames; @@ -381,229 +406,408 @@ private boolean REGEX_sempred(RuleContext _localctx, int predIndex) { return true; } - public static final String _serializedATN = "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2X\u027e\b\1\b\1\4" - + "\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n" - + "\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22" - + "\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31" - + "\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t" - + " \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t" - + "+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64" - + "\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t" - + "=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4" - + "I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\t" - + "T\4U\tU\4V\tV\4W\tW\3\2\6\2\u00b2\n\2\r\2\16\2\u00b3\3\2\3\2\3\3\3\3\3" - + "\3\3\3\7\3\u00bc\n\3\f\3\16\3\u00bf\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00c6" - + "\n\3\f\3\16\3\u00c9\13\3\3\3\3\3\5\3\u00cd\n\3\3\3\3\3\3\4\3\4\3\5\3\5" - + "\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3" - + "\f\3\f\3\f\3\r\3\r\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\20\3\21\3\21\3" - + "\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\23\3\23\3\23\3\24\3\24\3" - + "\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3" - + "\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3" - + "\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3" - + "\33\3\33\3\34\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3" - + "\35\3\35\3\35\3\35\3\36\3\36\3\37\3\37\3 \3 \3!\3!\3!\3\"\3\"\3#\3#\3" - + "$\3$\3%\3%\3%\3&\3&\3&\3\'\3\'\3\'\3\'\3(\3(\3)\3)\3)\3*\3*\3+\3+\3+\3" - + ",\3,\3,\3-\3-\3-\3-\3.\3.\3.\3/\3/\3/\3/\3\60\3\60\3\61\3\61\3\62\3\62" - + "\3\63\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\66\3\66\3\67\3\67\3\67\38\3" - + "8\38\39\39\39\3:\3:\3:\3;\3;\3;\3;\3<\3<\3<\3=\3=\3=\3>\3>\3?\3?\3?\3" - + "@\3@\3@\3A\3A\3A\3B\3B\3B\3C\3C\3C\3D\3D\3D\3E\3E\3E\3F\3F\3F\3G\3G\3" - + "G\3G\3H\3H\3H\3H\3I\3I\3I\3I\3I\3J\3J\6J\u01be\nJ\rJ\16J\u01bf\3J\5J\u01c3" - + "\nJ\3K\3K\3K\6K\u01c8\nK\rK\16K\u01c9\3K\5K\u01cd\nK\3L\3L\3L\7L\u01d2" - + "\nL\fL\16L\u01d5\13L\5L\u01d7\nL\3L\5L\u01da\nL\3M\3M\3M\7M\u01df\nM\f" - + "M\16M\u01e2\13M\5M\u01e4\nM\3M\3M\6M\u01e8\nM\rM\16M\u01e9\5M\u01ec\n" - + "M\3M\3M\5M\u01f0\nM\3M\6M\u01f3\nM\rM\16M\u01f4\5M\u01f7\nM\3M\5M\u01fa" - + "\nM\3N\3N\3N\3N\3N\3N\7N\u0202\nN\fN\16N\u0205\13N\3N\3N\3N\3N\3N\3N\3" - + "N\7N\u020e\nN\fN\16N\u0211\13N\3N\5N\u0214\nN\3O\3O\3O\3O\6O\u021a\nO" - + "\rO\16O\u021b\3O\3O\7O\u0220\nO\fO\16O\u0223\13O\3O\3O\3P\3P\3P\3P\3P" - + "\3Q\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\3R\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S" - + "\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S\3S" - + "\3S\3S\3S\5S\u025d\nS\3T\3T\3T\3T\3U\3U\7U\u0265\nU\fU\16U\u0268\13U\3" - + "V\3V\3V\7V\u026d\nV\fV\16V\u0270\13V\5V\u0272\nV\3V\3V\3W\3W\7W\u0278" - + "\nW\fW\16W\u027b\13W\3W\3W\7\u00bd\u00c7\u0203\u020f\u021b\2X\4\3\6\4" - + "\b\5\n\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32\16\34\17\36\20 \21\"\22" - + "$\23&\24(\25*\26,\27.\30\60\31\62\32\64\33\66\348\35:\36<\37> @!B\"D#" - + "F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008aF\u008cG\u008eH\u0090" - + "I\u0092J\u0094K\u0096L\u0098M\u009aN\u009cO\u009eP\u00a0Q\u00a2R\u00a4" - + "S\u00a6T\u00a8U\u00aaV\u00acW\u00aeX\4\2\3\25\5\2\13\f\17\17\"\"\4\2\f" - + "\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b\2FFHHN" - + "Nffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2))^^\3\2\f\f\4\2\f\f\61" - + "\61\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u02a4\2\4\3\2\2\2\2\6" - + "\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20\3\2\2\2" - + "\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2\2\2\2\34" - + "\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3\2\2\2\2(" - + "\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3\2\2\2\2" - + "\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3\2\2\2\2" - + "@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2\2\2L\3" - + "\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2X\3\2\2" - + "\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3\2\2\2" - + "\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2\2\2r" - + "\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2~\3\2" - + "\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2\2\2\2" - + "\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2\u0090" - + "\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098\3\2\2" - + "\2\2\u009a\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2\2\2\u00a0\3\2\2\2\2\u00a2" - + "\3\2\2\2\2\u00a4\3\2\2\2\2\u00a6\3\2\2\2\2\u00a8\3\2\2\2\2\u00aa\3\2\2" - + "\2\3\u00ac\3\2\2\2\3\u00ae\3\2\2\2\4\u00b1\3\2\2\2\6\u00cc\3\2\2\2\b\u00d0" - + "\3\2\2\2\n\u00d2\3\2\2\2\f\u00d4\3\2\2\2\16\u00d6\3\2\2\2\20\u00d8\3\2" - + "\2\2\22\u00da\3\2\2\2\24\u00dc\3\2\2\2\26\u00de\3\2\2\2\30\u00e2\3\2\2" - + "\2\32\u00e7\3\2\2\2\34\u00e9\3\2\2\2\36\u00eb\3\2\2\2 \u00ee\3\2\2\2\"" - + "\u00f1\3\2\2\2$\u00f6\3\2\2\2&\u00fc\3\2\2\2(\u00ff\3\2\2\2*\u0103\3\2" - + "\2\2,\u010c\3\2\2\2.\u0112\3\2\2\2\60\u0119\3\2\2\2\62\u011d\3\2\2\2\64" - + "\u0121\3\2\2\2\66\u0127\3\2\2\28\u012d\3\2\2\2:\u0132\3\2\2\2<\u013d\3" - + "\2\2\2>\u013f\3\2\2\2@\u0141\3\2\2\2B\u0143\3\2\2\2D\u0146\3\2\2\2F\u0148" - + "\3\2\2\2H\u014a\3\2\2\2J\u014c\3\2\2\2L\u014f\3\2\2\2N\u0152\3\2\2\2P" - + "\u0156\3\2\2\2R\u0158\3\2\2\2T\u015b\3\2\2\2V\u015d\3\2\2\2X\u0160\3\2" - + "\2\2Z\u0163\3\2\2\2\\\u0167\3\2\2\2^\u016a\3\2\2\2`\u016e\3\2\2\2b\u0170" - + "\3\2\2\2d\u0172\3\2\2\2f\u0174\3\2\2\2h\u0177\3\2\2\2j\u017a\3\2\2\2l" - + "\u017c\3\2\2\2n\u017e\3\2\2\2p\u0181\3\2\2\2r\u0184\3\2\2\2t\u0187\3\2" - + "\2\2v\u018a\3\2\2\2x\u018e\3\2\2\2z\u0191\3\2\2\2|\u0194\3\2\2\2~\u0196" - + "\3\2\2\2\u0080\u0199\3\2\2\2\u0082\u019c\3\2\2\2\u0084\u019f\3\2\2\2\u0086" - + "\u01a2\3\2\2\2\u0088\u01a5\3\2\2\2\u008a\u01a8\3\2\2\2\u008c\u01ab\3\2" - + "\2\2\u008e\u01ae\3\2\2\2\u0090\u01b2\3\2\2\2\u0092\u01b6\3\2\2\2\u0094" - + "\u01bb\3\2\2\2\u0096\u01c4\3\2\2\2\u0098\u01d6\3\2\2\2\u009a\u01e3\3\2" - + "\2\2\u009c\u0213\3\2\2\2\u009e\u0215\3\2\2\2\u00a0\u0226\3\2\2\2\u00a2" - + "\u022b\3\2\2\2\u00a4\u0231\3\2\2\2\u00a6\u025c\3\2\2\2\u00a8\u025e\3\2" - + "\2\2\u00aa\u0262\3\2\2\2\u00ac\u0271\3\2\2\2\u00ae\u0275\3\2\2\2\u00b0" - + "\u00b2\t\2\2\2\u00b1\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00b1\3\2" - + "\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b5\3\2\2\2\u00b5\u00b6\b\2\2\2\u00b6" - + "\5\3\2\2\2\u00b7\u00b8\7\61\2\2\u00b8\u00b9\7\61\2\2\u00b9\u00bd\3\2\2" - + "\2\u00ba\u00bc\13\2\2\2\u00bb\u00ba\3\2\2\2\u00bc\u00bf\3\2\2\2\u00bd" - + "\u00be\3\2\2\2\u00bd\u00bb\3\2\2\2\u00be\u00c0\3\2\2\2\u00bf\u00bd\3\2" - + "\2\2\u00c0\u00cd\t\3\2\2\u00c1\u00c2\7\61\2\2\u00c2\u00c3\7,\2\2\u00c3" - + "\u00c7\3\2\2\2\u00c4\u00c6\13\2\2\2\u00c5\u00c4\3\2\2\2\u00c6\u00c9\3" - + "\2\2\2\u00c7\u00c8\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c8\u00ca\3\2\2\2\u00c9" - + "\u00c7\3\2\2\2\u00ca\u00cb\7,\2\2\u00cb\u00cd\7\61\2\2\u00cc\u00b7\3\2" - + "\2\2\u00cc\u00c1\3\2\2\2\u00cd\u00ce\3\2\2\2\u00ce\u00cf\b\3\2\2\u00cf" - + "\7\3\2\2\2\u00d0\u00d1\7}\2\2\u00d1\t\3\2\2\2\u00d2\u00d3\7\177\2\2\u00d3" - + "\13\3\2\2\2\u00d4\u00d5\7]\2\2\u00d5\r\3\2\2\2\u00d6\u00d7\7_\2\2\u00d7" - + "\17\3\2\2\2\u00d8\u00d9\7*\2\2\u00d9\21\3\2\2\2\u00da\u00db\7+\2\2\u00db" - + "\23\3\2\2\2\u00dc\u00dd\7&\2\2\u00dd\25\3\2\2\2\u00de\u00df\7\60\2\2\u00df" - + "\u00e0\3\2\2\2\u00e0\u00e1\b\13\3\2\u00e1\27\3\2\2\2\u00e2\u00e3\7A\2" - + "\2\u00e3\u00e4\7\60\2\2\u00e4\u00e5\3\2\2\2\u00e5\u00e6\b\f\3\2\u00e6" - + "\31\3\2\2\2\u00e7\u00e8\7.\2\2\u00e8\33\3\2\2\2\u00e9\u00ea\7=\2\2\u00ea" - + "\35\3\2\2\2\u00eb\u00ec\7k\2\2\u00ec\u00ed\7h\2\2\u00ed\37\3\2\2\2\u00ee" - + "\u00ef\7k\2\2\u00ef\u00f0\7p\2\2\u00f0!\3\2\2\2\u00f1\u00f2\7g\2\2\u00f2" - + "\u00f3\7n\2\2\u00f3\u00f4\7u\2\2\u00f4\u00f5\7g\2\2\u00f5#\3\2\2\2\u00f6" - + "\u00f7\7y\2\2\u00f7\u00f8\7j\2\2\u00f8\u00f9\7k\2\2\u00f9\u00fa\7n\2\2" - + "\u00fa\u00fb\7g\2\2\u00fb%\3\2\2\2\u00fc\u00fd\7f\2\2\u00fd\u00fe\7q\2" - + "\2\u00fe\'\3\2\2\2\u00ff\u0100\7h\2\2\u0100\u0101\7q\2\2\u0101\u0102\7" - + "t\2\2\u0102)\3\2\2\2\u0103\u0104\7e\2\2\u0104\u0105\7q\2\2\u0105\u0106" - + "\7p\2\2\u0106\u0107\7v\2\2\u0107\u0108\7k\2\2\u0108\u0109\7p\2\2\u0109" - + "\u010a\7w\2\2\u010a\u010b\7g\2\2\u010b+\3\2\2\2\u010c\u010d\7d\2\2\u010d" - + "\u010e\7t\2\2\u010e\u010f\7g\2\2\u010f\u0110\7c\2\2\u0110\u0111\7m\2\2" - + "\u0111-\3\2\2\2\u0112\u0113\7t\2\2\u0113\u0114\7g\2\2\u0114\u0115\7v\2" - + "\2\u0115\u0116\7w\2\2\u0116\u0117\7t\2\2\u0117\u0118\7p\2\2\u0118/\3\2" - + "\2\2\u0119\u011a\7p\2\2\u011a\u011b\7g\2\2\u011b\u011c\7y\2\2\u011c\61" - + "\3\2\2\2\u011d\u011e\7v\2\2\u011e\u011f\7t\2\2\u011f\u0120\7{\2\2\u0120" - + "\63\3\2\2\2\u0121\u0122\7e\2\2\u0122\u0123\7c\2\2\u0123\u0124\7v\2\2\u0124" - + "\u0125\7e\2\2\u0125\u0126\7j\2\2\u0126\65\3\2\2\2\u0127\u0128\7v\2\2\u0128" - + "\u0129\7j\2\2\u0129\u012a\7t\2\2\u012a\u012b\7q\2\2\u012b\u012c\7y\2\2" - + "\u012c\67\3\2\2\2\u012d\u012e\7v\2\2\u012e\u012f\7j\2\2\u012f\u0130\7" - + "k\2\2\u0130\u0131\7u\2\2\u01319\3\2\2\2\u0132\u0133\7k\2\2\u0133\u0134" - + "\7p\2\2\u0134\u0135\7u\2\2\u0135\u0136\7v\2\2\u0136\u0137\7c\2\2\u0137" - + "\u0138\7p\2\2\u0138\u0139\7e\2\2\u0139\u013a\7g\2\2\u013a\u013b\7q\2\2" - + "\u013b\u013c\7h\2\2\u013c;\3\2\2\2\u013d\u013e\7#\2\2\u013e=\3\2\2\2\u013f" - + "\u0140\7\u0080\2\2\u0140?\3\2\2\2\u0141\u0142\7,\2\2\u0142A\3\2\2\2\u0143" - + "\u0144\7\61\2\2\u0144\u0145\6!\2\2\u0145C\3\2\2\2\u0146\u0147\7\'\2\2" - + "\u0147E\3\2\2\2\u0148\u0149\7-\2\2\u0149G\3\2\2\2\u014a\u014b\7/\2\2\u014b" - + "I\3\2\2\2\u014c\u014d\7>\2\2\u014d\u014e\7>\2\2\u014eK\3\2\2\2\u014f\u0150" - + "\7@\2\2\u0150\u0151\7@\2\2\u0151M\3\2\2\2\u0152\u0153\7@\2\2\u0153\u0154" - + "\7@\2\2\u0154\u0155\7@\2\2\u0155O\3\2\2\2\u0156\u0157\7>\2\2\u0157Q\3" - + "\2\2\2\u0158\u0159\7>\2\2\u0159\u015a\7?\2\2\u015aS\3\2\2\2\u015b\u015c" - + "\7@\2\2\u015cU\3\2\2\2\u015d\u015e\7@\2\2\u015e\u015f\7?\2\2\u015fW\3" - + "\2\2\2\u0160\u0161\7?\2\2\u0161\u0162\7?\2\2\u0162Y\3\2\2\2\u0163\u0164" - + "\7?\2\2\u0164\u0165\7?\2\2\u0165\u0166\7?\2\2\u0166[\3\2\2\2\u0167\u0168" - + "\7#\2\2\u0168\u0169\7?\2\2\u0169]\3\2\2\2\u016a\u016b\7#\2\2\u016b\u016c" - + "\7?\2\2\u016c\u016d\7?\2\2\u016d_\3\2\2\2\u016e\u016f\7(\2\2\u016fa\3" - + "\2\2\2\u0170\u0171\7`\2\2\u0171c\3\2\2\2\u0172\u0173\7~\2\2\u0173e\3\2" - + "\2\2\u0174\u0175\7(\2\2\u0175\u0176\7(\2\2\u0176g\3\2\2\2\u0177\u0178" - + "\7~\2\2\u0178\u0179\7~\2\2\u0179i\3\2\2\2\u017a\u017b\7A\2\2\u017bk\3" - + "\2\2\2\u017c\u017d\7<\2\2\u017dm\3\2\2\2\u017e\u017f\7A\2\2\u017f\u0180" - + "\7<\2\2\u0180o\3\2\2\2\u0181\u0182\7<\2\2\u0182\u0183\7<\2\2\u0183q\3" - + "\2\2\2\u0184\u0185\7/\2\2\u0185\u0186\7@\2\2\u0186s\3\2\2\2\u0187\u0188" - + "\7?\2\2\u0188\u0189\7\u0080\2\2\u0189u\3\2\2\2\u018a\u018b\7?\2\2\u018b" - + "\u018c\7?\2\2\u018c\u018d\7\u0080\2\2\u018dw\3\2\2\2\u018e\u018f\7-\2" - + "\2\u018f\u0190\7-\2\2\u0190y\3\2\2\2\u0191\u0192\7/\2\2\u0192\u0193\7" - + "/\2\2\u0193{\3\2\2\2\u0194\u0195\7?\2\2\u0195}\3\2\2\2\u0196\u0197\7-" - + "\2\2\u0197\u0198\7?\2\2\u0198\177\3\2\2\2\u0199\u019a\7/\2\2\u019a\u019b" - + "\7?\2\2\u019b\u0081\3\2\2\2\u019c\u019d\7,\2\2\u019d\u019e\7?\2\2\u019e" - + "\u0083\3\2\2\2\u019f\u01a0\7\61\2\2\u01a0\u01a1\7?\2\2\u01a1\u0085\3\2" - + "\2\2\u01a2\u01a3\7\'\2\2\u01a3\u01a4\7?\2\2\u01a4\u0087\3\2\2\2\u01a5" - + "\u01a6\7(\2\2\u01a6\u01a7\7?\2\2\u01a7\u0089\3\2\2\2\u01a8\u01a9\7`\2" - + "\2\u01a9\u01aa\7?\2\2\u01aa\u008b\3\2\2\2\u01ab\u01ac\7~\2\2\u01ac\u01ad" - + "\7?\2\2\u01ad\u008d\3\2\2\2\u01ae\u01af\7>\2\2\u01af\u01b0\7>\2\2\u01b0" - + "\u01b1\7?\2\2\u01b1\u008f\3\2\2\2\u01b2\u01b3\7@\2\2\u01b3\u01b4\7@\2" - + "\2\u01b4\u01b5\7?\2\2\u01b5\u0091\3\2\2\2\u01b6\u01b7\7@\2\2\u01b7\u01b8" - + "\7@\2\2\u01b8\u01b9\7@\2\2\u01b9\u01ba\7?\2\2\u01ba\u0093\3\2\2\2\u01bb" - + "\u01bd\7\62\2\2\u01bc\u01be\t\4\2\2\u01bd\u01bc\3\2\2\2\u01be\u01bf\3" - + "\2\2\2\u01bf\u01bd\3\2\2\2\u01bf\u01c0\3\2\2\2\u01c0\u01c2\3\2\2\2\u01c1" - + "\u01c3\t\5\2\2\u01c2\u01c1\3\2\2\2\u01c2\u01c3\3\2\2\2\u01c3\u0095\3\2" - + "\2\2\u01c4\u01c5\7\62\2\2\u01c5\u01c7\t\6\2\2\u01c6\u01c8\t\7\2\2\u01c7" - + "\u01c6\3\2\2\2\u01c8\u01c9\3\2\2\2\u01c9\u01c7\3\2\2\2\u01c9\u01ca\3\2" - + "\2\2\u01ca\u01cc\3\2\2\2\u01cb\u01cd\t\5\2\2\u01cc\u01cb\3\2\2\2\u01cc" - + "\u01cd\3\2\2\2\u01cd\u0097\3\2\2\2\u01ce\u01d7\7\62\2\2\u01cf\u01d3\t" - + "\b\2\2\u01d0\u01d2\t\t\2\2\u01d1\u01d0\3\2\2\2\u01d2\u01d5\3\2\2\2\u01d3" - + "\u01d1\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d4\u01d7\3\2\2\2\u01d5\u01d3\3\2" - + "\2\2\u01d6\u01ce\3\2\2\2\u01d6\u01cf\3\2\2\2\u01d7\u01d9\3\2\2\2\u01d8" - + "\u01da\t\n\2\2\u01d9\u01d8\3\2\2\2\u01d9\u01da\3\2\2\2\u01da\u0099\3\2" - + "\2\2\u01db\u01e4\7\62\2\2\u01dc\u01e0\t\b\2\2\u01dd\u01df\t\t\2\2\u01de" - + "\u01dd\3\2\2\2\u01df\u01e2\3\2\2\2\u01e0\u01de\3\2\2\2\u01e0\u01e1\3\2" - + "\2\2\u01e1\u01e4\3\2\2\2\u01e2\u01e0\3\2\2\2\u01e3\u01db\3\2\2\2\u01e3" - + "\u01dc\3\2\2\2\u01e4\u01eb\3\2\2\2\u01e5\u01e7\5\26\13\2\u01e6\u01e8\t" - + "\t\2\2\u01e7\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01e7\3\2\2\2\u01e9" - + "\u01ea\3\2\2\2\u01ea\u01ec\3\2\2\2\u01eb\u01e5\3\2\2\2\u01eb\u01ec\3\2" - + "\2\2\u01ec\u01f6\3\2\2\2\u01ed\u01ef\t\13\2\2\u01ee\u01f0\t\f\2\2\u01ef" - + "\u01ee\3\2\2\2\u01ef\u01f0\3\2\2\2\u01f0\u01f2\3\2\2\2\u01f1\u01f3\t\t" - + "\2\2\u01f2\u01f1\3\2\2\2\u01f3\u01f4\3\2\2\2\u01f4\u01f2\3\2\2\2\u01f4" - + "\u01f5\3\2\2\2\u01f5\u01f7\3\2\2\2\u01f6\u01ed\3\2\2\2\u01f6\u01f7\3\2" - + "\2\2\u01f7\u01f9\3\2\2\2\u01f8\u01fa\t\r\2\2\u01f9\u01f8\3\2\2\2\u01f9" - + "\u01fa\3\2\2\2\u01fa\u009b\3\2\2\2\u01fb\u0203\7$\2\2\u01fc\u01fd\7^\2" - + "\2\u01fd\u0202\7$\2\2\u01fe\u01ff\7^\2\2\u01ff\u0202\7^\2\2\u0200\u0202" - + "\n\16\2\2\u0201\u01fc\3\2\2\2\u0201\u01fe\3\2\2\2\u0201\u0200\3\2\2\2" - + "\u0202\u0205\3\2\2\2\u0203\u0204\3\2\2\2\u0203\u0201\3\2\2\2\u0204\u0206" - + "\3\2\2\2\u0205\u0203\3\2\2\2\u0206\u0214\7$\2\2\u0207\u020f\7)\2\2\u0208" - + "\u0209\7^\2\2\u0209\u020e\7)\2\2\u020a\u020b\7^\2\2\u020b\u020e\7^\2\2" - + "\u020c\u020e\n\17\2\2\u020d\u0208\3\2\2\2\u020d\u020a\3\2\2\2\u020d\u020c" - + "\3\2\2\2\u020e\u0211\3\2\2\2\u020f\u0210\3\2\2\2\u020f\u020d\3\2\2\2\u0210" - + "\u0212\3\2\2\2\u0211\u020f\3\2\2\2\u0212\u0214\7)\2\2\u0213\u01fb\3\2" - + "\2\2\u0213\u0207\3\2\2\2\u0214\u009d\3\2\2\2\u0215\u0219\7\61\2\2\u0216" - + "\u0217\7^\2\2\u0217\u021a\n\20\2\2\u0218\u021a\n\21\2\2\u0219\u0216\3" - + "\2\2\2\u0219\u0218\3\2\2\2\u021a\u021b\3\2\2\2\u021b\u021c\3\2\2\2\u021b" - + "\u0219\3\2\2\2\u021c\u021d\3\2\2\2\u021d\u0221\7\61\2\2\u021e\u0220\t" - + "\22\2\2\u021f\u021e\3\2\2\2\u0220\u0223\3\2\2\2\u0221\u021f\3\2\2\2\u0221" - + "\u0222\3\2\2\2\u0222\u0224\3\2\2\2\u0223\u0221\3\2\2\2\u0224\u0225\6O" - + "\3\2\u0225\u009f\3\2\2\2\u0226\u0227\7v\2\2\u0227\u0228\7t\2\2\u0228\u0229" - + "\7w\2\2\u0229\u022a\7g\2\2\u022a\u00a1\3\2\2\2\u022b\u022c\7h\2\2\u022c" - + "\u022d\7c\2\2\u022d\u022e\7n\2\2\u022e\u022f\7u\2\2\u022f\u0230\7g\2\2" - + "\u0230\u00a3\3\2\2\2\u0231\u0232\7p\2\2\u0232\u0233\7w\2\2\u0233\u0234" - + "\7n\2\2\u0234\u0235\7n\2\2\u0235\u00a5\3\2\2\2\u0236\u0237\7d\2\2\u0237" - + "\u0238\7q\2\2\u0238\u0239\7q\2\2\u0239\u023a\7n\2\2\u023a\u023b\7g\2\2" - + "\u023b\u023c\7c\2\2\u023c\u025d\7p\2\2\u023d\u023e\7d\2\2\u023e\u023f" - + "\7{\2\2\u023f\u0240\7v\2\2\u0240\u025d\7g\2\2\u0241\u0242\7u\2\2\u0242" - + "\u0243\7j\2\2\u0243\u0244\7q\2\2\u0244\u0245\7t\2\2\u0245\u025d\7v\2\2" - + "\u0246\u0247\7e\2\2\u0247\u0248\7j\2\2\u0248\u0249\7c\2\2\u0249\u025d" - + "\7t\2\2\u024a\u024b\7k\2\2\u024b\u024c\7p\2\2\u024c\u025d\7v\2\2\u024d" - + "\u024e\7n\2\2\u024e\u024f\7q\2\2\u024f\u0250\7p\2\2\u0250\u025d\7i\2\2" - + "\u0251\u0252\7h\2\2\u0252\u0253\7n\2\2\u0253\u0254\7q\2\2\u0254\u0255" - + "\7c\2\2\u0255\u025d\7v\2\2\u0256\u0257\7f\2\2\u0257\u0258\7q\2\2\u0258" - + "\u0259\7w\2\2\u0259\u025a\7d\2\2\u025a\u025b\7n\2\2\u025b\u025d\7g\2\2" - + "\u025c\u0236\3\2\2\2\u025c\u023d\3\2\2\2\u025c\u0241\3\2\2\2\u025c\u0246" - + "\3\2\2\2\u025c\u024a\3\2\2\2\u025c\u024d\3\2\2\2\u025c\u0251\3\2\2\2\u025c" - + "\u0256\3\2\2\2\u025d\u00a7\3\2\2\2\u025e\u025f\7f\2\2\u025f\u0260\7g\2" - + "\2\u0260\u0261\7h\2\2\u0261\u00a9\3\2\2\2\u0262\u0266\t\23\2\2\u0263\u0265" - + "\t\24\2\2\u0264\u0263\3\2\2\2\u0265\u0268\3\2\2\2\u0266\u0264\3\2\2\2" - + "\u0266\u0267\3\2\2\2\u0267\u00ab\3\2\2\2\u0268\u0266\3\2\2\2\u0269\u0272" - + "\7\62\2\2\u026a\u026e\t\b\2\2\u026b\u026d\t\t\2\2\u026c\u026b\3\2\2\2" - + "\u026d\u0270\3\2\2\2\u026e\u026c\3\2\2\2\u026e\u026f\3\2\2\2\u026f\u0272" - + "\3\2\2\2\u0270\u026e\3\2\2\2\u0271\u0269\3\2\2\2\u0271\u026a\3\2\2\2\u0272" - + "\u0273\3\2\2\2\u0273\u0274\bV\4\2\u0274\u00ad\3\2\2\2\u0275\u0279\t\23" - + "\2\2\u0276\u0278\t\24\2\2\u0277\u0276\3\2\2\2\u0278\u027b\3\2\2\2\u0279" - + "\u0277\3\2\2\2\u0279\u027a\3\2\2\2\u027a\u027c\3\2\2\2\u027b\u0279\3\2" - + "\2\2\u027c\u027d\bW\4\2\u027d\u00af\3\2\2\2$\2\3\u00b3\u00bd\u00c7\u00cc" - + "\u01bf\u01c2\u01c9\u01cc\u01d3\u01d6\u01d9\u01e0\u01e3\u01e9\u01eb\u01ef" - + "\u01f4\u01f6\u01f9\u0201\u0203\u020d\u020f\u0213\u0219\u021b\u0221\u025c" - + "\u0266\u026e\u0271\u0279\5\b\2\2\4\3\2\4\2\2"; + public static final String _serializedATN = "\u0004\u0000V\u027c\u0006\uffff\uffff\u0006\uffff\uffff\u0002\u0000\u0007" + + "\u0000\u0002\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007" + + "\u0003\u0002\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007" + + "\u0006\u0002\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n" + + "\u0007\n\u0002\u000b\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002" + + "\u000e\u0007\u000e\u0002\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002" + + "\u0011\u0007\u0011\u0002\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002" + + "\u0014\u0007\u0014\u0002\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002" + + "\u0017\u0007\u0017\u0002\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002" + + "\u001a\u0007\u001a\u0002\u001b\u0007\u001b\u0002\u001c\u0007\u001c\u0002" + + "\u001d\u0007\u001d\u0002\u001e\u0007\u001e\u0002\u001f\u0007\u001f\u0002" + + " \u0007 \u0002!\u0007!\u0002\"\u0007\"\u0002#\u0007#\u0002$\u0007$\u0002" + + "%\u0007%\u0002&\u0007&\u0002\'\u0007\'\u0002(\u0007(\u0002)\u0007)\u0002" + + "*\u0007*\u0002+\u0007+\u0002,\u0007,\u0002-\u0007-\u0002.\u0007.\u0002" + + "/\u0007/\u00020\u00070\u00021\u00071\u00022\u00072\u00023\u00073\u0002" + + "4\u00074\u00025\u00075\u00026\u00076\u00027\u00077\u00028\u00078\u0002" + + "9\u00079\u0002:\u0007:\u0002;\u0007;\u0002<\u0007<\u0002=\u0007=\u0002" + + ">\u0007>\u0002?\u0007?\u0002@\u0007@\u0002A\u0007A\u0002B\u0007B\u0002" + + "C\u0007C\u0002D\u0007D\u0002E\u0007E\u0002F\u0007F\u0002G\u0007G\u0002" + + "H\u0007H\u0002I\u0007I\u0002J\u0007J\u0002K\u0007K\u0002L\u0007L\u0002" + + "M\u0007M\u0002N\u0007N\u0002O\u0007O\u0002P\u0007P\u0002Q\u0007Q\u0002" + + "R\u0007R\u0002S\u0007S\u0002T\u0007T\u0002U\u0007U\u0001\u0000\u0004\u0000" + + "\u00b0\b\u0000\u000b\u0000\f\u0000\u00b1\u0001\u0000\u0001\u0000\u0001" + + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001\u00ba\b\u0001\n" + + "\u0001\f\u0001\u00bd\t\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001" + + "\u0001\u0001\u0001\u0005\u0001\u00c4\b\u0001\n\u0001\f\u0001\u00c7\t\u0001" + + "\u0001\u0001\u0001\u0001\u0003\u0001\u00cb\b\u0001\u0001\u0001\u0001\u0001" + + "\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0004\u0001\u0004" + + "\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007" + + "\u0001\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001" + + "\n\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\r\u0001" + + "\r\u0001\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f" + + "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011" + + "\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013" + + "\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013" + + "\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014" + + "\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015" + + "\u0001\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016" + + "\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018" + + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0019\u0001\u0019" + + "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a" + + "\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001\u001b" + + "\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b\u0001\u001b" + + "\u0001\u001b\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001d\u0001\u001d" + + "\u0001\u001e\u0001\u001e\u0001\u001f\u0001\u001f\u0001\u001f\u0001 \u0001" + + " \u0001!\u0001!\u0001\"\u0001\"\u0001#\u0001#\u0001#\u0001$\u0001$\u0001" + + "$\u0001%\u0001%\u0001%\u0001%\u0001&\u0001&\u0001\'\u0001\'\u0001\'\u0001" + + "(\u0001(\u0001)\u0001)\u0001)\u0001*\u0001*\u0001*\u0001+\u0001+\u0001" + + "+\u0001+\u0001,\u0001,\u0001,\u0001-\u0001-\u0001-\u0001-\u0001.\u0001" + + ".\u0001/\u0001/\u00010\u00010\u00011\u00011\u00011\u00012\u00012\u0001" + + "2\u00013\u00013\u00014\u00014\u00015\u00015\u00015\u00016\u00016\u0001" + + "6\u00017\u00017\u00017\u00018\u00018\u00018\u00019\u00019\u00019\u0001" + + "9\u0001:\u0001:\u0001:\u0001;\u0001;\u0001;\u0001<\u0001<\u0001=\u0001" + + "=\u0001=\u0001>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001@\u0001@\u0001" + + "@\u0001A\u0001A\u0001A\u0001B\u0001B\u0001B\u0001C\u0001C\u0001C\u0001" + + "D\u0001D\u0001D\u0001E\u0001E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001" + + "F\u0001G\u0001G\u0001G\u0001G\u0001G\u0001H\u0001H\u0004H\u01bc\bH\u000b" + + "H\fH\u01bd\u0001H\u0003H\u01c1\bH\u0001I\u0001I\u0001I\u0004I\u01c6\b" + + "I\u000bI\fI\u01c7\u0001I\u0003I\u01cb\bI\u0001J\u0001J\u0001J\u0005J\u01d0" + + "\bJ\nJ\fJ\u01d3\tJ\u0003J\u01d5\bJ\u0001J\u0003J\u01d8\bJ\u0001K\u0001" + + "K\u0001K\u0005K\u01dd\bK\nK\fK\u01e0\tK\u0003K\u01e2\bK\u0001K\u0001K" + + "\u0004K\u01e6\bK\u000bK\fK\u01e7\u0003K\u01ea\bK\u0001K\u0001K\u0003K" + + "\u01ee\bK\u0001K\u0004K\u01f1\bK\u000bK\fK\u01f2\u0003K\u01f5\bK\u0001" + + "K\u0003K\u01f8\bK\u0001L\u0001L\u0001L\u0001L\u0001L\u0001L\u0005L\u0200" + + "\bL\nL\fL\u0203\tL\u0001L\u0001L\u0001L\u0001L\u0001L\u0001L\u0001L\u0005" + + "L\u020c\bL\nL\fL\u020f\tL\u0001L\u0003L\u0212\bL\u0001M\u0001M\u0001M" + + "\u0001M\u0004M\u0218\bM\u000bM\fM\u0219\u0001M\u0001M\u0005M\u021e\bM" + + "\nM\fM\u0221\tM\u0001M\u0001M\u0001N\u0001N\u0001N\u0001N\u0001N\u0001" + + "O\u0001O\u0001O\u0001O\u0001O\u0001O\u0001P\u0001P\u0001P\u0001P\u0001" + + "P\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001" + + "Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001" + + "Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001" + + "Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0001Q\u0003Q\u025b" + + "\bQ\u0001R\u0001R\u0001R\u0001R\u0001S\u0001S\u0005S\u0263\bS\nS\fS\u0266" + + "\tS\u0001T\u0001T\u0001T\u0005T\u026b\bT\nT\fT\u026e\tT\u0003T\u0270\b" + + "T\u0001T\u0001T\u0001U\u0001U\u0005U\u0276\bU\nU\fU\u0279\tU\u0001U\u0001" + + "U\u0005\u00bb\u00c5\u0201\u020d\u0219\u0000V\u0002\u0001\u0004\u0002\u0006" + + "\u0003\b\u0004\n\u0005\f\u0006\u000e\u0007\u0010\b\u0012\t\u0014\n\u0016" + + "\u000b\u0018\f\u001a\r\u001c\u000e\u001e\u000f \u0010\"\u0011$\u0012&" + + "\u0013(\u0014*\u0015,\u0016.\u00170\u00182\u00194\u001a6\u001b8\u001c" + + ":\u001d<\u001e>\u001f@ B!D\"F#H$J%L&N\'P(R)T*V+X,Z-\\.^/`0b1d2f3h4j5l" + + "6n7p8r9t:v;x~?\u0080@\u0082A\u0084B\u0086C\u0088D\u008aE\u008cF\u008e" + + "G\u0090H\u0092I\u0094J\u0096K\u0098L\u009aM\u009cN\u009eO\u00a0P\u00a2" + + "Q\u00a4R\u00a6S\u00a8T\u00aaU\u00acV\u0002\u0000\u0001\u0013\u0003\u0000" + + "\t\n\r\r \u0002\u0000\n\n\r\r\u0001\u000007\u0002\u0000LLll\u0002\u0000" + + "XXxx\u0003\u000009AFaf\u0001\u000019\u0001\u000009\u0006\u0000DDFFLLd" + + "dffll\u0002\u0000EEee\u0002\u0000++--\u0004\u0000DDFFddff\u0002\u0000" + + "\"\"\\\\\u0002\u0000\'\'\\\\\u0001\u0000\n\n\u0002\u0000\n\n//\u0007\u0000" + + "UUcciilmssuuxx\u0003\u0000AZ__az\u0004\u000009AZ__az\u02a2\u0000\u0002" + + "\u0001\u0000\u0000\u0000\u0000\u0004\u0001\u0000\u0000\u0000\u0000\u0006" + + "\u0001\u0000\u0000\u0000\u0000\b\u0001\u0000\u0000\u0000\u0000\n\u0001" + + "\u0000\u0000\u0000\u0000\f\u0001\u0000\u0000\u0000\u0000\u000e\u0001\u0000" + + "\u0000\u0000\u0000\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001\u0000" + + "\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001\u0000" + + "\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001\u0000" + + "\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001\u0000" + + "\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000\u0000" + + "\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000\u0000" + + "(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000\u0000\u0000,\u0001" + + "\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u00000\u0001\u0000\u0000" + + "\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000\u0000" + + "6\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000\u0000\u0000:\u0001" + + "\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000\u0000" + + "\u0000\u0000@\u0001\u0000\u0000\u0000\u0000B\u0001\u0000\u0000\u0000\u0000" + + "D\u0001\u0000\u0000\u0000\u0000F\u0001\u0000\u0000\u0000\u0000H\u0001" + + "\u0000\u0000\u0000\u0000J\u0001\u0000\u0000\u0000\u0000L\u0001\u0000\u0000" + + "\u0000\u0000N\u0001\u0000\u0000\u0000\u0000P\u0001\u0000\u0000\u0000\u0000" + + "R\u0001\u0000\u0000\u0000\u0000T\u0001\u0000\u0000\u0000\u0000V\u0001" + + "\u0000\u0000\u0000\u0000X\u0001\u0000\u0000\u0000\u0000Z\u0001\u0000\u0000" + + "\u0000\u0000\\\u0001\u0000\u0000\u0000\u0000^\u0001\u0000\u0000\u0000" + + "\u0000`\u0001\u0000\u0000\u0000\u0000b\u0001\u0000\u0000\u0000\u0000d" + + "\u0001\u0000\u0000\u0000\u0000f\u0001\u0000\u0000\u0000\u0000h\u0001\u0000" + + "\u0000\u0000\u0000j\u0001\u0000\u0000\u0000\u0000l\u0001\u0000\u0000\u0000" + + "\u0000n\u0001\u0000\u0000\u0000\u0000p\u0001\u0000\u0000\u0000\u0000r" + + "\u0001\u0000\u0000\u0000\u0000t\u0001\u0000\u0000\u0000\u0000v\u0001\u0000" + + "\u0000\u0000\u0000x\u0001\u0000\u0000\u0000\u0000z\u0001\u0000\u0000\u0000" + + "\u0000|\u0001\u0000\u0000\u0000\u0000~\u0001\u0000\u0000\u0000\u0000\u0080" + + "\u0001\u0000\u0000\u0000\u0000\u0082\u0001\u0000\u0000\u0000\u0000\u0084" + + "\u0001\u0000\u0000\u0000\u0000\u0086\u0001\u0000\u0000\u0000\u0000\u0088" + + "\u0001\u0000\u0000\u0000\u0000\u008a\u0001\u0000\u0000\u0000\u0000\u008c" + + "\u0001\u0000\u0000\u0000\u0000\u008e\u0001\u0000\u0000\u0000\u0000\u0090" + + "\u0001\u0000\u0000\u0000\u0000\u0092\u0001\u0000\u0000\u0000\u0000\u0094" + + "\u0001\u0000\u0000\u0000\u0000\u0096\u0001\u0000\u0000\u0000\u0000\u0098" + + "\u0001\u0000\u0000\u0000\u0000\u009a\u0001\u0000\u0000\u0000\u0000\u009c" + + "\u0001\u0000\u0000\u0000\u0000\u009e\u0001\u0000\u0000\u0000\u0000\u00a0" + + "\u0001\u0000\u0000\u0000\u0000\u00a2\u0001\u0000\u0000\u0000\u0000\u00a4" + + "\u0001\u0000\u0000\u0000\u0000\u00a6\u0001\u0000\u0000\u0000\u0000\u00a8" + + "\u0001\u0000\u0000\u0000\u0001\u00aa\u0001\u0000\u0000\u0000\u0001\u00ac" + + "\u0001\u0000\u0000\u0000\u0002\u00af\u0001\u0000\u0000\u0000\u0004\u00ca" + + "\u0001\u0000\u0000\u0000\u0006\u00ce\u0001\u0000\u0000\u0000\b\u00d0\u0001" + + "\u0000\u0000\u0000\n\u00d2\u0001\u0000\u0000\u0000\f\u00d4\u0001\u0000" + + "\u0000\u0000\u000e\u00d6\u0001\u0000\u0000\u0000\u0010\u00d8\u0001\u0000" + + "\u0000\u0000\u0012\u00da\u0001\u0000\u0000\u0000\u0014\u00dc\u0001\u0000" + + "\u0000\u0000\u0016\u00e0\u0001\u0000\u0000\u0000\u0018\u00e5\u0001\u0000" + + "\u0000\u0000\u001a\u00e7\u0001\u0000\u0000\u0000\u001c\u00e9\u0001\u0000" + + "\u0000\u0000\u001e\u00ec\u0001\u0000\u0000\u0000 \u00ef\u0001\u0000\u0000" + + "\u0000\"\u00f4\u0001\u0000\u0000\u0000$\u00fa\u0001\u0000\u0000\u0000" + + "&\u00fd\u0001\u0000\u0000\u0000(\u0101\u0001\u0000\u0000\u0000*\u010a" + + "\u0001\u0000\u0000\u0000,\u0110\u0001\u0000\u0000\u0000.\u0117\u0001\u0000" + + "\u0000\u00000\u011b\u0001\u0000\u0000\u00002\u011f\u0001\u0000\u0000\u0000" + + "4\u0125\u0001\u0000\u0000\u00006\u012b\u0001\u0000\u0000\u00008\u0130" + + "\u0001\u0000\u0000\u0000:\u013b\u0001\u0000\u0000\u0000<\u013d\u0001\u0000" + + "\u0000\u0000>\u013f\u0001\u0000\u0000\u0000@\u0141\u0001\u0000\u0000\u0000" + + "B\u0144\u0001\u0000\u0000\u0000D\u0146\u0001\u0000\u0000\u0000F\u0148" + + "\u0001\u0000\u0000\u0000H\u014a\u0001\u0000\u0000\u0000J\u014d\u0001\u0000" + + "\u0000\u0000L\u0150\u0001\u0000\u0000\u0000N\u0154\u0001\u0000\u0000\u0000" + + "P\u0156\u0001\u0000\u0000\u0000R\u0159\u0001\u0000\u0000\u0000T\u015b" + + "\u0001\u0000\u0000\u0000V\u015e\u0001\u0000\u0000\u0000X\u0161\u0001\u0000" + + "\u0000\u0000Z\u0165\u0001\u0000\u0000\u0000\\\u0168\u0001\u0000\u0000" + + "\u0000^\u016c\u0001\u0000\u0000\u0000`\u016e\u0001\u0000\u0000\u0000b" + + "\u0170\u0001\u0000\u0000\u0000d\u0172\u0001\u0000\u0000\u0000f\u0175\u0001" + + "\u0000\u0000\u0000h\u0178\u0001\u0000\u0000\u0000j\u017a\u0001\u0000\u0000" + + "\u0000l\u017c\u0001\u0000\u0000\u0000n\u017f\u0001\u0000\u0000\u0000p" + + "\u0182\u0001\u0000\u0000\u0000r\u0185\u0001\u0000\u0000\u0000t\u0188\u0001" + + "\u0000\u0000\u0000v\u018c\u0001\u0000\u0000\u0000x\u018f\u0001\u0000\u0000" + + "\u0000z\u0192\u0001\u0000\u0000\u0000|\u0194\u0001\u0000\u0000\u0000~" + + "\u0197\u0001\u0000\u0000\u0000\u0080\u019a\u0001\u0000\u0000\u0000\u0082" + + "\u019d\u0001\u0000\u0000\u0000\u0084\u01a0\u0001\u0000\u0000\u0000\u0086" + + "\u01a3\u0001\u0000\u0000\u0000\u0088\u01a6\u0001\u0000\u0000\u0000\u008a" + + "\u01a9\u0001\u0000\u0000\u0000\u008c\u01ac\u0001\u0000\u0000\u0000\u008e" + + "\u01b0\u0001\u0000\u0000\u0000\u0090\u01b4\u0001\u0000\u0000\u0000\u0092" + + "\u01b9\u0001\u0000\u0000\u0000\u0094\u01c2\u0001\u0000\u0000\u0000\u0096" + + "\u01d4\u0001\u0000\u0000\u0000\u0098\u01e1\u0001\u0000\u0000\u0000\u009a" + + "\u0211\u0001\u0000\u0000\u0000\u009c\u0213\u0001\u0000\u0000\u0000\u009e" + + "\u0224\u0001\u0000\u0000\u0000\u00a0\u0229\u0001\u0000\u0000\u0000\u00a2" + + "\u022f\u0001\u0000\u0000\u0000\u00a4\u025a\u0001\u0000\u0000\u0000\u00a6" + + "\u025c\u0001\u0000\u0000\u0000\u00a8\u0260\u0001\u0000\u0000\u0000\u00aa" + + "\u026f\u0001\u0000\u0000\u0000\u00ac\u0273\u0001\u0000\u0000\u0000\u00ae" + + "\u00b0\u0007\u0000\u0000\u0000\u00af\u00ae\u0001\u0000\u0000\u0000\u00b0" + + "\u00b1\u0001\u0000\u0000\u0000\u00b1\u00af\u0001\u0000\u0000\u0000\u00b1" + + "\u00b2\u0001\u0000\u0000\u0000\u00b2\u00b3\u0001\u0000\u0000\u0000\u00b3" + + "\u00b4\u0006\u0000\u0000\u0000\u00b4\u0003\u0001\u0000\u0000\u0000\u00b5" + + "\u00b6\u0005/\u0000\u0000\u00b6\u00b7\u0005/\u0000\u0000\u00b7\u00bb\u0001" + + "\u0000\u0000\u0000\u00b8\u00ba\t\u0000\u0000\u0000\u00b9\u00b8\u0001\u0000" + + "\u0000\u0000\u00ba\u00bd\u0001\u0000\u0000\u0000\u00bb\u00bc\u0001\u0000" + + "\u0000\u0000\u00bb\u00b9\u0001\u0000\u0000\u0000\u00bc\u00be\u0001\u0000" + + "\u0000\u0000\u00bd\u00bb\u0001\u0000\u0000\u0000\u00be\u00cb\u0007\u0001" + + "\u0000\u0000\u00bf\u00c0\u0005/\u0000\u0000\u00c0\u00c1\u0005*\u0000\u0000" + + "\u00c1\u00c5\u0001\u0000\u0000\u0000\u00c2\u00c4\t\u0000\u0000\u0000\u00c3" + + "\u00c2\u0001\u0000\u0000\u0000\u00c4\u00c7\u0001\u0000\u0000\u0000\u00c5" + + "\u00c6\u0001\u0000\u0000\u0000\u00c5\u00c3\u0001\u0000\u0000\u0000\u00c6" + + "\u00c8\u0001\u0000\u0000\u0000\u00c7\u00c5\u0001\u0000\u0000\u0000\u00c8" + + "\u00c9\u0005*\u0000\u0000\u00c9\u00cb\u0005/\u0000\u0000\u00ca\u00b5\u0001" + + "\u0000\u0000\u0000\u00ca\u00bf\u0001\u0000\u0000\u0000\u00cb\u00cc\u0001" + + "\u0000\u0000\u0000\u00cc\u00cd\u0006\u0001\u0000\u0000\u00cd\u0005\u0001" + + "\u0000\u0000\u0000\u00ce\u00cf\u0005{\u0000\u0000\u00cf\u0007\u0001\u0000" + + "\u0000\u0000\u00d0\u00d1\u0005}\u0000\u0000\u00d1\t\u0001\u0000\u0000" + + "\u0000\u00d2\u00d3\u0005[\u0000\u0000\u00d3\u000b\u0001\u0000\u0000\u0000" + + "\u00d4\u00d5\u0005]\u0000\u0000\u00d5\r\u0001\u0000\u0000\u0000\u00d6" + + "\u00d7\u0005(\u0000\u0000\u00d7\u000f\u0001\u0000\u0000\u0000\u00d8\u00d9" + + "\u0005)\u0000\u0000\u00d9\u0011\u0001\u0000\u0000\u0000\u00da\u00db\u0005" + + "$\u0000\u0000\u00db\u0013\u0001\u0000\u0000\u0000\u00dc\u00dd\u0005.\u0000" + + "\u0000\u00dd\u00de\u0001\u0000\u0000\u0000\u00de\u00df\u0006\t\u0001\u0000" + + "\u00df\u0015\u0001\u0000\u0000\u0000\u00e0\u00e1\u0005?\u0000\u0000\u00e1" + + "\u00e2\u0005.\u0000\u0000\u00e2\u00e3\u0001\u0000\u0000\u0000\u00e3\u00e4" + + "\u0006\n\u0001\u0000\u00e4\u0017\u0001\u0000\u0000\u0000\u00e5\u00e6\u0005" + + ",\u0000\u0000\u00e6\u0019\u0001\u0000\u0000\u0000\u00e7\u00e8\u0005;\u0000" + + "\u0000\u00e8\u001b\u0001\u0000\u0000\u0000\u00e9\u00ea\u0005i\u0000\u0000" + + "\u00ea\u00eb\u0005f\u0000\u0000\u00eb\u001d\u0001\u0000\u0000\u0000\u00ec" + + "\u00ed\u0005i\u0000\u0000\u00ed\u00ee\u0005n\u0000\u0000\u00ee\u001f\u0001" + + "\u0000\u0000\u0000\u00ef\u00f0\u0005e\u0000\u0000\u00f0\u00f1\u0005l\u0000" + + "\u0000\u00f1\u00f2\u0005s\u0000\u0000\u00f2\u00f3\u0005e\u0000\u0000\u00f3" + + "!\u0001\u0000\u0000\u0000\u00f4\u00f5\u0005w\u0000\u0000\u00f5\u00f6\u0005" + + "h\u0000\u0000\u00f6\u00f7\u0005i\u0000\u0000\u00f7\u00f8\u0005l\u0000" + + "\u0000\u00f8\u00f9\u0005e\u0000\u0000\u00f9#\u0001\u0000\u0000\u0000\u00fa" + + "\u00fb\u0005d\u0000\u0000\u00fb\u00fc\u0005o\u0000\u0000\u00fc%\u0001" + + "\u0000\u0000\u0000\u00fd\u00fe\u0005f\u0000\u0000\u00fe\u00ff\u0005o\u0000" + + "\u0000\u00ff\u0100\u0005r\u0000\u0000\u0100\'\u0001\u0000\u0000\u0000" + + "\u0101\u0102\u0005c\u0000\u0000\u0102\u0103\u0005o\u0000\u0000\u0103\u0104" + + "\u0005n\u0000\u0000\u0104\u0105\u0005t\u0000\u0000\u0105\u0106\u0005i" + + "\u0000\u0000\u0106\u0107\u0005n\u0000\u0000\u0107\u0108\u0005u\u0000\u0000" + + "\u0108\u0109\u0005e\u0000\u0000\u0109)\u0001\u0000\u0000\u0000\u010a\u010b" + + "\u0005b\u0000\u0000\u010b\u010c\u0005r\u0000\u0000\u010c\u010d\u0005e" + + "\u0000\u0000\u010d\u010e\u0005a\u0000\u0000\u010e\u010f\u0005k\u0000\u0000" + + "\u010f+\u0001\u0000\u0000\u0000\u0110\u0111\u0005r\u0000\u0000\u0111\u0112" + + "\u0005e\u0000\u0000\u0112\u0113\u0005t\u0000\u0000\u0113\u0114\u0005u" + + "\u0000\u0000\u0114\u0115\u0005r\u0000\u0000\u0115\u0116\u0005n\u0000\u0000" + + "\u0116-\u0001\u0000\u0000\u0000\u0117\u0118\u0005n\u0000\u0000\u0118\u0119" + + "\u0005e\u0000\u0000\u0119\u011a\u0005w\u0000\u0000\u011a/\u0001\u0000" + + "\u0000\u0000\u011b\u011c\u0005t\u0000\u0000\u011c\u011d\u0005r\u0000\u0000" + + "\u011d\u011e\u0005y\u0000\u0000\u011e1\u0001\u0000\u0000\u0000\u011f\u0120" + + "\u0005c\u0000\u0000\u0120\u0121\u0005a\u0000\u0000\u0121\u0122\u0005t" + + "\u0000\u0000\u0122\u0123\u0005c\u0000\u0000\u0123\u0124\u0005h\u0000\u0000" + + "\u01243\u0001\u0000\u0000\u0000\u0125\u0126\u0005t\u0000\u0000\u0126\u0127" + + "\u0005h\u0000\u0000\u0127\u0128\u0005r\u0000\u0000\u0128\u0129\u0005o" + + "\u0000\u0000\u0129\u012a\u0005w\u0000\u0000\u012a5\u0001\u0000\u0000\u0000" + + "\u012b\u012c\u0005t\u0000\u0000\u012c\u012d\u0005h\u0000\u0000\u012d\u012e" + + "\u0005i\u0000\u0000\u012e\u012f\u0005s\u0000\u0000\u012f7\u0001\u0000" + + "\u0000\u0000\u0130\u0131\u0005i\u0000\u0000\u0131\u0132\u0005n\u0000\u0000" + + "\u0132\u0133\u0005s\u0000\u0000\u0133\u0134\u0005t\u0000\u0000\u0134\u0135" + + "\u0005a\u0000\u0000\u0135\u0136\u0005n\u0000\u0000\u0136\u0137\u0005c" + + "\u0000\u0000\u0137\u0138\u0005e\u0000\u0000\u0138\u0139\u0005o\u0000\u0000" + + "\u0139\u013a\u0005f\u0000\u0000\u013a9\u0001\u0000\u0000\u0000\u013b\u013c" + + "\u0005!\u0000\u0000\u013c;\u0001\u0000\u0000\u0000\u013d\u013e\u0005~" + + "\u0000\u0000\u013e=\u0001\u0000\u0000\u0000\u013f\u0140\u0005*\u0000\u0000" + + "\u0140?\u0001\u0000\u0000\u0000\u0141\u0142\u0005/\u0000\u0000\u0142\u0143" + + "\u0004\u001f\u0000\u0000\u0143A\u0001\u0000\u0000\u0000\u0144\u0145\u0005" + + "%\u0000\u0000\u0145C\u0001\u0000\u0000\u0000\u0146\u0147\u0005+\u0000" + + "\u0000\u0147E\u0001\u0000\u0000\u0000\u0148\u0149\u0005-\u0000\u0000\u0149" + + "G\u0001\u0000\u0000\u0000\u014a\u014b\u0005<\u0000\u0000\u014b\u014c\u0005" + + "<\u0000\u0000\u014cI\u0001\u0000\u0000\u0000\u014d\u014e\u0005>\u0000" + + "\u0000\u014e\u014f\u0005>\u0000\u0000\u014fK\u0001\u0000\u0000\u0000\u0150" + + "\u0151\u0005>\u0000\u0000\u0151\u0152\u0005>\u0000\u0000\u0152\u0153\u0005" + + ">\u0000\u0000\u0153M\u0001\u0000\u0000\u0000\u0154\u0155\u0005<\u0000" + + "\u0000\u0155O\u0001\u0000\u0000\u0000\u0156\u0157\u0005<\u0000\u0000\u0157" + + "\u0158\u0005=\u0000\u0000\u0158Q\u0001\u0000\u0000\u0000\u0159\u015a\u0005" + + ">\u0000\u0000\u015aS\u0001\u0000\u0000\u0000\u015b\u015c\u0005>\u0000" + + "\u0000\u015c\u015d\u0005=\u0000\u0000\u015dU\u0001\u0000\u0000\u0000\u015e" + + "\u015f\u0005=\u0000\u0000\u015f\u0160\u0005=\u0000\u0000\u0160W\u0001" + + "\u0000\u0000\u0000\u0161\u0162\u0005=\u0000\u0000\u0162\u0163\u0005=\u0000" + + "\u0000\u0163\u0164\u0005=\u0000\u0000\u0164Y\u0001\u0000\u0000\u0000\u0165" + + "\u0166\u0005!\u0000\u0000\u0166\u0167\u0005=\u0000\u0000\u0167[\u0001" + + "\u0000\u0000\u0000\u0168\u0169\u0005!\u0000\u0000\u0169\u016a\u0005=\u0000" + + "\u0000\u016a\u016b\u0005=\u0000\u0000\u016b]\u0001\u0000\u0000\u0000\u016c" + + "\u016d\u0005&\u0000\u0000\u016d_\u0001\u0000\u0000\u0000\u016e\u016f\u0005" + + "^\u0000\u0000\u016fa\u0001\u0000\u0000\u0000\u0170\u0171\u0005|\u0000" + + "\u0000\u0171c\u0001\u0000\u0000\u0000\u0172\u0173\u0005&\u0000\u0000\u0173" + + "\u0174\u0005&\u0000\u0000\u0174e\u0001\u0000\u0000\u0000\u0175\u0176\u0005" + + "|\u0000\u0000\u0176\u0177\u0005|\u0000\u0000\u0177g\u0001\u0000\u0000" + + "\u0000\u0178\u0179\u0005?\u0000\u0000\u0179i\u0001\u0000\u0000\u0000\u017a" + + "\u017b\u0005:\u0000\u0000\u017bk\u0001\u0000\u0000\u0000\u017c\u017d\u0005" + + "?\u0000\u0000\u017d\u017e\u0005:\u0000\u0000\u017em\u0001\u0000\u0000" + + "\u0000\u017f\u0180\u0005:\u0000\u0000\u0180\u0181\u0005:\u0000\u0000\u0181" + + "o\u0001\u0000\u0000\u0000\u0182\u0183\u0005-\u0000\u0000\u0183\u0184\u0005" + + ">\u0000\u0000\u0184q\u0001\u0000\u0000\u0000\u0185\u0186\u0005=\u0000" + + "\u0000\u0186\u0187\u0005~\u0000\u0000\u0187s\u0001\u0000\u0000\u0000\u0188" + + "\u0189\u0005=\u0000\u0000\u0189\u018a\u0005=\u0000\u0000\u018a\u018b\u0005" + + "~\u0000\u0000\u018bu\u0001\u0000\u0000\u0000\u018c\u018d\u0005+\u0000" + + "\u0000\u018d\u018e\u0005+\u0000\u0000\u018ew\u0001\u0000\u0000\u0000\u018f" + + "\u0190\u0005-\u0000\u0000\u0190\u0191\u0005-\u0000\u0000\u0191y\u0001" + + "\u0000\u0000\u0000\u0192\u0193\u0005=\u0000\u0000\u0193{\u0001\u0000\u0000" + + "\u0000\u0194\u0195\u0005+\u0000\u0000\u0195\u0196\u0005=\u0000\u0000\u0196" + + "}\u0001\u0000\u0000\u0000\u0197\u0198\u0005-\u0000\u0000\u0198\u0199\u0005" + + "=\u0000\u0000\u0199\u007f\u0001\u0000\u0000\u0000\u019a\u019b\u0005*\u0000" + + "\u0000\u019b\u019c\u0005=\u0000\u0000\u019c\u0081\u0001\u0000\u0000\u0000" + + "\u019d\u019e\u0005/\u0000\u0000\u019e\u019f\u0005=\u0000\u0000\u019f\u0083" + + "\u0001\u0000\u0000\u0000\u01a0\u01a1\u0005%\u0000\u0000\u01a1\u01a2\u0005" + + "=\u0000\u0000\u01a2\u0085\u0001\u0000\u0000\u0000\u01a3\u01a4\u0005&\u0000" + + "\u0000\u01a4\u01a5\u0005=\u0000\u0000\u01a5\u0087\u0001\u0000\u0000\u0000" + + "\u01a6\u01a7\u0005^\u0000\u0000\u01a7\u01a8\u0005=\u0000\u0000\u01a8\u0089" + + "\u0001\u0000\u0000\u0000\u01a9\u01aa\u0005|\u0000\u0000\u01aa\u01ab\u0005" + + "=\u0000\u0000\u01ab\u008b\u0001\u0000\u0000\u0000\u01ac\u01ad\u0005<\u0000" + + "\u0000\u01ad\u01ae\u0005<\u0000\u0000\u01ae\u01af\u0005=\u0000\u0000\u01af" + + "\u008d\u0001\u0000\u0000\u0000\u01b0\u01b1\u0005>\u0000\u0000\u01b1\u01b2" + + "\u0005>\u0000\u0000\u01b2\u01b3\u0005=\u0000\u0000\u01b3\u008f\u0001\u0000" + + "\u0000\u0000\u01b4\u01b5\u0005>\u0000\u0000\u01b5\u01b6\u0005>\u0000\u0000" + + "\u01b6\u01b7\u0005>\u0000\u0000\u01b7\u01b8\u0005=\u0000\u0000\u01b8\u0091" + + "\u0001\u0000\u0000\u0000\u01b9\u01bb\u00050\u0000\u0000\u01ba\u01bc\u0007" + + "\u0002\u0000\u0000\u01bb\u01ba\u0001\u0000\u0000\u0000\u01bc\u01bd\u0001" + + "\u0000\u0000\u0000\u01bd\u01bb\u0001\u0000\u0000\u0000\u01bd\u01be\u0001" + + "\u0000\u0000\u0000\u01be\u01c0\u0001\u0000\u0000\u0000\u01bf\u01c1\u0007" + + "\u0003\u0000\u0000\u01c0\u01bf\u0001\u0000\u0000\u0000\u01c0\u01c1\u0001" + + "\u0000\u0000\u0000\u01c1\u0093\u0001\u0000\u0000\u0000\u01c2\u01c3\u0005" + + "0\u0000\u0000\u01c3\u01c5\u0007\u0004\u0000\u0000\u01c4\u01c6\u0007\u0005" + + "\u0000\u0000\u01c5\u01c4\u0001\u0000\u0000\u0000\u01c6\u01c7\u0001\u0000" + + "\u0000\u0000\u01c7\u01c5\u0001\u0000\u0000\u0000\u01c7\u01c8\u0001\u0000" + + "\u0000\u0000\u01c8\u01ca\u0001\u0000\u0000\u0000\u01c9\u01cb\u0007\u0003" + + "\u0000\u0000\u01ca\u01c9\u0001\u0000\u0000\u0000\u01ca\u01cb\u0001\u0000" + + "\u0000\u0000\u01cb\u0095\u0001\u0000\u0000\u0000\u01cc\u01d5\u00050\u0000" + + "\u0000\u01cd\u01d1\u0007\u0006\u0000\u0000\u01ce\u01d0\u0007\u0007\u0000" + + "\u0000\u01cf\u01ce\u0001\u0000\u0000\u0000\u01d0\u01d3\u0001\u0000\u0000" + + "\u0000\u01d1\u01cf\u0001\u0000\u0000\u0000\u01d1\u01d2\u0001\u0000\u0000" + + "\u0000\u01d2\u01d5\u0001\u0000\u0000\u0000\u01d3\u01d1\u0001\u0000\u0000" + + "\u0000\u01d4\u01cc\u0001\u0000\u0000\u0000\u01d4\u01cd\u0001\u0000\u0000" + + "\u0000\u01d5\u01d7\u0001\u0000\u0000\u0000\u01d6\u01d8\u0007\b\u0000\u0000" + + "\u01d7\u01d6\u0001\u0000\u0000\u0000\u01d7\u01d8\u0001\u0000\u0000\u0000" + + "\u01d8\u0097\u0001\u0000\u0000\u0000\u01d9\u01e2\u00050\u0000\u0000\u01da" + + "\u01de\u0007\u0006\u0000\u0000\u01db\u01dd\u0007\u0007\u0000\u0000\u01dc" + + "\u01db\u0001\u0000\u0000\u0000\u01dd\u01e0\u0001\u0000\u0000\u0000\u01de" + + "\u01dc\u0001\u0000\u0000\u0000\u01de\u01df\u0001\u0000\u0000\u0000\u01df" + + "\u01e2\u0001\u0000\u0000\u0000\u01e0\u01de\u0001\u0000\u0000\u0000\u01e1" + + "\u01d9\u0001\u0000\u0000\u0000\u01e1\u01da\u0001\u0000\u0000\u0000\u01e2" + + "\u01e9\u0001\u0000\u0000\u0000\u01e3\u01e5\u0003\u0014\t\u0000\u01e4\u01e6" + + "\u0007\u0007\u0000\u0000\u01e5\u01e4\u0001\u0000\u0000\u0000\u01e6\u01e7" + + "\u0001\u0000\u0000\u0000\u01e7\u01e5\u0001\u0000\u0000\u0000\u01e7\u01e8" + + "\u0001\u0000\u0000\u0000\u01e8\u01ea\u0001\u0000\u0000\u0000\u01e9\u01e3" + + "\u0001\u0000\u0000\u0000\u01e9\u01ea\u0001\u0000\u0000\u0000\u01ea\u01f4" + + "\u0001\u0000\u0000\u0000\u01eb\u01ed\u0007\t\u0000\u0000\u01ec\u01ee\u0007" + + "\n\u0000\u0000\u01ed\u01ec\u0001\u0000\u0000\u0000\u01ed\u01ee\u0001\u0000" + + "\u0000\u0000\u01ee\u01f0\u0001\u0000\u0000\u0000\u01ef\u01f1\u0007\u0007" + + "\u0000\u0000\u01f0\u01ef\u0001\u0000\u0000\u0000\u01f1\u01f2\u0001\u0000" + + "\u0000\u0000\u01f2\u01f0\u0001\u0000\u0000\u0000\u01f2\u01f3\u0001\u0000" + + "\u0000\u0000\u01f3\u01f5\u0001\u0000\u0000\u0000\u01f4\u01eb\u0001\u0000" + + "\u0000\u0000\u01f4\u01f5\u0001\u0000\u0000\u0000\u01f5\u01f7\u0001\u0000" + + "\u0000\u0000\u01f6\u01f8\u0007\u000b\u0000\u0000\u01f7\u01f6\u0001\u0000" + + "\u0000\u0000\u01f7\u01f8\u0001\u0000\u0000\u0000\u01f8\u0099\u0001\u0000" + + "\u0000\u0000\u01f9\u0201\u0005\"\u0000\u0000\u01fa\u01fb\u0005\\\u0000" + + "\u0000\u01fb\u0200\u0005\"\u0000\u0000\u01fc\u01fd\u0005\\\u0000\u0000" + + "\u01fd\u0200\u0005\\\u0000\u0000\u01fe\u0200\b\f\u0000\u0000\u01ff\u01fa" + + "\u0001\u0000\u0000\u0000\u01ff\u01fc\u0001\u0000\u0000\u0000\u01ff\u01fe" + + "\u0001\u0000\u0000\u0000\u0200\u0203\u0001\u0000\u0000\u0000\u0201\u0202" + + "\u0001\u0000\u0000\u0000\u0201\u01ff\u0001\u0000\u0000\u0000\u0202\u0204" + + "\u0001\u0000\u0000\u0000\u0203\u0201\u0001\u0000\u0000\u0000\u0204\u0212" + + "\u0005\"\u0000\u0000\u0205\u020d\u0005\'\u0000\u0000\u0206\u0207\u0005" + + "\\\u0000\u0000\u0207\u020c\u0005\'\u0000\u0000\u0208\u0209\u0005\\\u0000" + + "\u0000\u0209\u020c\u0005\\\u0000\u0000\u020a\u020c\b\r\u0000\u0000\u020b" + + "\u0206\u0001\u0000\u0000\u0000\u020b\u0208\u0001\u0000\u0000\u0000\u020b" + + "\u020a\u0001\u0000\u0000\u0000\u020c\u020f\u0001\u0000\u0000\u0000\u020d" + + "\u020e\u0001\u0000\u0000\u0000\u020d\u020b\u0001\u0000\u0000\u0000\u020e" + + "\u0210\u0001\u0000\u0000\u0000\u020f\u020d\u0001\u0000\u0000\u0000\u0210" + + "\u0212\u0005\'\u0000\u0000\u0211\u01f9\u0001\u0000\u0000\u0000\u0211\u0205" + + "\u0001\u0000\u0000\u0000\u0212\u009b\u0001\u0000\u0000\u0000\u0213\u0217" + + "\u0005/\u0000\u0000\u0214\u0215\u0005\\\u0000\u0000\u0215\u0218\b\u000e" + + "\u0000\u0000\u0216\u0218\b\u000f\u0000\u0000\u0217\u0214\u0001\u0000\u0000" + + "\u0000\u0217\u0216\u0001\u0000\u0000\u0000\u0218\u0219\u0001\u0000\u0000" + + "\u0000\u0219\u021a\u0001\u0000\u0000\u0000\u0219\u0217\u0001\u0000\u0000" + + "\u0000\u021a\u021b\u0001\u0000\u0000\u0000\u021b\u021f\u0005/\u0000\u0000" + + "\u021c\u021e\u0007\u0010\u0000\u0000\u021d\u021c\u0001\u0000\u0000\u0000" + + "\u021e\u0221\u0001\u0000\u0000\u0000\u021f\u021d\u0001\u0000\u0000\u0000" + + "\u021f\u0220\u0001\u0000\u0000\u0000\u0220\u0222\u0001\u0000\u0000\u0000" + + "\u0221\u021f\u0001\u0000\u0000\u0000\u0222\u0223\u0004M\u0001\u0000\u0223" + + "\u009d\u0001\u0000\u0000\u0000\u0224\u0225\u0005t\u0000\u0000\u0225\u0226" + + "\u0005r\u0000\u0000\u0226\u0227\u0005u\u0000\u0000\u0227\u0228\u0005e" + + "\u0000\u0000\u0228\u009f\u0001\u0000\u0000\u0000\u0229\u022a\u0005f\u0000" + + "\u0000\u022a\u022b\u0005a\u0000\u0000\u022b\u022c\u0005l\u0000\u0000\u022c" + + "\u022d\u0005s\u0000\u0000\u022d\u022e\u0005e\u0000\u0000\u022e\u00a1\u0001" + + "\u0000\u0000\u0000\u022f\u0230\u0005n\u0000\u0000\u0230\u0231\u0005u\u0000" + + "\u0000\u0231\u0232\u0005l\u0000\u0000\u0232\u0233\u0005l\u0000\u0000\u0233" + + "\u00a3\u0001\u0000\u0000\u0000\u0234\u0235\u0005b\u0000\u0000\u0235\u0236" + + "\u0005o\u0000\u0000\u0236\u0237\u0005o\u0000\u0000\u0237\u0238\u0005l" + + "\u0000\u0000\u0238\u0239\u0005e\u0000\u0000\u0239\u023a\u0005a\u0000\u0000" + + "\u023a\u025b\u0005n\u0000\u0000\u023b\u023c\u0005b\u0000\u0000\u023c\u023d" + + "\u0005y\u0000\u0000\u023d\u023e\u0005t\u0000\u0000\u023e\u025b\u0005e" + + "\u0000\u0000\u023f\u0240\u0005s\u0000\u0000\u0240\u0241\u0005h\u0000\u0000" + + "\u0241\u0242\u0005o\u0000\u0000\u0242\u0243\u0005r\u0000\u0000\u0243\u025b" + + "\u0005t\u0000\u0000\u0244\u0245\u0005c\u0000\u0000\u0245\u0246\u0005h" + + "\u0000\u0000\u0246\u0247\u0005a\u0000\u0000\u0247\u025b\u0005r\u0000\u0000" + + "\u0248\u0249\u0005i\u0000\u0000\u0249\u024a\u0005n\u0000\u0000\u024a\u025b" + + "\u0005t\u0000\u0000\u024b\u024c\u0005l\u0000\u0000\u024c\u024d\u0005o" + + "\u0000\u0000\u024d\u024e\u0005n\u0000\u0000\u024e\u025b\u0005g\u0000\u0000" + + "\u024f\u0250\u0005f\u0000\u0000\u0250\u0251\u0005l\u0000\u0000\u0251\u0252" + + "\u0005o\u0000\u0000\u0252\u0253\u0005a\u0000\u0000\u0253\u025b\u0005t" + + "\u0000\u0000\u0254\u0255\u0005d\u0000\u0000\u0255\u0256\u0005o\u0000\u0000" + + "\u0256\u0257\u0005u\u0000\u0000\u0257\u0258\u0005b\u0000\u0000\u0258\u0259" + + "\u0005l\u0000\u0000\u0259\u025b\u0005e\u0000\u0000\u025a\u0234\u0001\u0000" + + "\u0000\u0000\u025a\u023b\u0001\u0000\u0000\u0000\u025a\u023f\u0001\u0000" + + "\u0000\u0000\u025a\u0244\u0001\u0000\u0000\u0000\u025a\u0248\u0001\u0000" + + "\u0000\u0000\u025a\u024b\u0001\u0000\u0000\u0000\u025a\u024f\u0001\u0000" + + "\u0000\u0000\u025a\u0254\u0001\u0000\u0000\u0000\u025b\u00a5\u0001\u0000" + + "\u0000\u0000\u025c\u025d\u0005d\u0000\u0000\u025d\u025e\u0005e\u0000\u0000" + + "\u025e\u025f\u0005f\u0000\u0000\u025f\u00a7\u0001\u0000\u0000\u0000\u0260" + + "\u0264\u0007\u0011\u0000\u0000\u0261\u0263\u0007\u0012\u0000\u0000\u0262" + + "\u0261\u0001\u0000\u0000\u0000\u0263\u0266\u0001\u0000\u0000\u0000\u0264" + + "\u0262\u0001\u0000\u0000\u0000\u0264\u0265\u0001\u0000\u0000\u0000\u0265" + + "\u00a9\u0001\u0000\u0000\u0000\u0266\u0264\u0001\u0000\u0000\u0000\u0267" + + "\u0270\u00050\u0000\u0000\u0268\u026c\u0007\u0006\u0000\u0000\u0269\u026b" + + "\u0007\u0007\u0000\u0000\u026a\u0269\u0001\u0000\u0000\u0000\u026b\u026e" + + "\u0001\u0000\u0000\u0000\u026c\u026a\u0001\u0000\u0000\u0000\u026c\u026d" + + "\u0001\u0000\u0000\u0000\u026d\u0270\u0001\u0000\u0000\u0000\u026e\u026c" + + "\u0001\u0000\u0000\u0000\u026f\u0267\u0001\u0000\u0000\u0000\u026f\u0268" + + "\u0001\u0000\u0000\u0000\u0270\u0271\u0001\u0000\u0000\u0000\u0271\u0272" + + "\u0006T\u0002\u0000\u0272\u00ab\u0001\u0000\u0000\u0000\u0273\u0277\u0007" + + "\u0011\u0000\u0000\u0274\u0276\u0007\u0012\u0000\u0000\u0275\u0274\u0001" + + "\u0000\u0000\u0000\u0276\u0279\u0001\u0000\u0000\u0000\u0277\u0275\u0001" + + "\u0000\u0000\u0000\u0277\u0278\u0001\u0000\u0000\u0000\u0278\u027a\u0001" + + "\u0000\u0000\u0000\u0279\u0277\u0001\u0000\u0000\u0000\u027a\u027b\u0006" + + "U\u0002\u0000\u027b\u00ad\u0001\u0000\u0000\u0000\"\u0000\u0001\u00b1" + + "\u00bb\u00c5\u00ca\u01bd\u01c0\u01c7\u01ca\u01d1\u01d4\u01d7\u01de\u01e1" + + "\u01e7\u01e9\u01ed\u01f2\u01f4\u01f7\u01ff\u0201\u020b\u020d\u0211\u0217" + + "\u0219\u021f\u025a\u0264\u026c\u026f\u0277\u0003\u0006\u0000\u0000\u0002" + + "\u0001\u0000\u0002\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java index 07ab6dd43890..45d35e87fc43 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessParser.java @@ -1,18 +1,31 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; -import org.antlr.v4.runtime.*; -import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Token; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.*; -import org.antlr.v4.runtime.tree.*; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; import java.util.List; -@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast" }) +@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) class PainlessParser extends Parser { static { - RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -32,220 +45,234 @@ class PainlessParser extends Parser { RULE_refcasttype = 22, RULE_chain = 23, RULE_primary = 24, RULE_postfix = 25, RULE_postdot = 26, RULE_callinvoke = 27, RULE_fieldaccess = 28, RULE_braceaccess = 29, RULE_arrayinitializer = 30, RULE_listinitializer = 31, RULE_mapinitializer = 32, RULE_maptoken = 33, RULE_arguments = 34, RULE_argument = 35, RULE_lambda = 36, RULE_lamtype = 37, RULE_funcref = 38; - public static final String[] ruleNames = { - "source", - "function", - "parameters", - "statement", - "rstatement", - "dstatement", - "trailer", - "block", - "empty", - "initializer", - "afterthought", - "declaration", - "decltype", - "type", - "declvar", - "trap", - "noncondexpression", - "expression", - "unary", - "unarynotaddsub", - "castexpression", - "primordefcasttype", - "refcasttype", - "chain", - "primary", - "postfix", - "postdot", - "callinvoke", - "fieldaccess", - "braceaccess", - "arrayinitializer", - "listinitializer", - "mapinitializer", - "maptoken", - "arguments", - "argument", - "lambda", - "lamtype", - "funcref" }; - - private static final String[] _LITERAL_NAMES = { - null, - null, - null, - "'{'", - "'}'", - "'['", - "']'", - "'('", - "')'", - "'$'", - "'.'", - "'?.'", - "','", - "';'", - "'if'", - "'in'", - "'else'", - "'while'", - "'do'", - "'for'", - "'continue'", - "'break'", - "'return'", - "'new'", - "'try'", - "'catch'", - "'throw'", - "'this'", - "'instanceof'", - "'!'", - "'~'", - "'*'", - "'/'", - "'%'", - "'+'", - "'-'", - "'<<'", - "'>>'", - "'>>>'", - "'<'", - "'<='", - "'>'", - "'>='", - "'=='", - "'==='", - "'!='", - "'!=='", - "'&'", - "'^'", - "'|'", - "'&&'", - "'||'", - "'?'", - "':'", - "'?:'", - "'::'", - "'->'", - "'=~'", - "'==~'", - "'++'", - "'--'", - "'='", - "'+='", - "'-='", - "'*='", - "'/='", - "'%='", - "'&='", - "'^='", - "'|='", - "'<<='", - "'>>='", - "'>>>='", - null, - null, - null, - null, - null, - null, - "'true'", - "'false'", - "'null'", - null, - "'def'" }; - private static final String[] _SYMBOLIC_NAMES = { - null, - "WS", - "COMMENT", - "LBRACK", - "RBRACK", - "LBRACE", - "RBRACE", - "LP", - "RP", - "DOLLAR", - "DOT", - "NSDOT", - "COMMA", - "SEMICOLON", - "IF", - "IN", - "ELSE", - "WHILE", - "DO", - "FOR", - "CONTINUE", - "BREAK", - "RETURN", - "NEW", - "TRY", - "CATCH", - "THROW", - "THIS", - "INSTANCEOF", - "BOOLNOT", - "BWNOT", - "MUL", - "DIV", - "REM", - "ADD", - "SUB", - "LSH", - "RSH", - "USH", - "LT", - "LTE", - "GT", - "GTE", - "EQ", - "EQR", - "NE", - "NER", - "BWAND", - "XOR", - "BWOR", - "BOOLAND", - "BOOLOR", - "COND", - "COLON", - "ELVIS", - "REF", - "ARROW", - "FIND", - "MATCH", - "INCR", - "DECR", - "ASSIGN", - "AADD", - "ASUB", - "AMUL", - "ADIV", - "AREM", - "AAND", - "AXOR", - "AOR", - "ALSH", - "ARSH", - "AUSH", - "OCTAL", - "HEX", - "INTEGER", - "DECIMAL", - "STRING", - "REGEX", - "TRUE", - "FALSE", - "NULL", - "PRIMITIVE", - "DEF", - "ID", - "DOTINTEGER", - "DOTID" }; + + private static String[] makeRuleNames() { + return new String[] { + "source", + "function", + "parameters", + "statement", + "rstatement", + "dstatement", + "trailer", + "block", + "empty", + "initializer", + "afterthought", + "declaration", + "decltype", + "type", + "declvar", + "trap", + "noncondexpression", + "expression", + "unary", + "unarynotaddsub", + "castexpression", + "primordefcasttype", + "refcasttype", + "chain", + "primary", + "postfix", + "postdot", + "callinvoke", + "fieldaccess", + "braceaccess", + "arrayinitializer", + "listinitializer", + "mapinitializer", + "maptoken", + "arguments", + "argument", + "lambda", + "lamtype", + "funcref" }; + } + + public static final String[] ruleNames = makeRuleNames(); + + private static String[] makeLiteralNames() { + return new String[] { + null, + null, + null, + "'{'", + "'}'", + "'['", + "']'", + "'('", + "')'", + "'$'", + "'.'", + "'?.'", + "','", + "';'", + "'if'", + "'in'", + "'else'", + "'while'", + "'do'", + "'for'", + "'continue'", + "'break'", + "'return'", + "'new'", + "'try'", + "'catch'", + "'throw'", + "'this'", + "'instanceof'", + "'!'", + "'~'", + "'*'", + "'/'", + "'%'", + "'+'", + "'-'", + "'<<'", + "'>>'", + "'>>>'", + "'<'", + "'<='", + "'>'", + "'>='", + "'=='", + "'==='", + "'!='", + "'!=='", + "'&'", + "'^'", + "'|'", + "'&&'", + "'||'", + "'?'", + "':'", + "'?:'", + "'::'", + "'->'", + "'=~'", + "'==~'", + "'++'", + "'--'", + "'='", + "'+='", + "'-='", + "'*='", + "'/='", + "'%='", + "'&='", + "'^='", + "'|='", + "'<<='", + "'>>='", + "'>>>='", + null, + null, + null, + null, + null, + null, + "'true'", + "'false'", + "'null'", + null, + "'def'" }; + } + + private static final String[] _LITERAL_NAMES = makeLiteralNames(); + + private static String[] makeSymbolicNames() { + return new String[] { + null, + "WS", + "COMMENT", + "LBRACK", + "RBRACK", + "LBRACE", + "RBRACE", + "LP", + "RP", + "DOLLAR", + "DOT", + "NSDOT", + "COMMA", + "SEMICOLON", + "IF", + "IN", + "ELSE", + "WHILE", + "DO", + "FOR", + "CONTINUE", + "BREAK", + "RETURN", + "NEW", + "TRY", + "CATCH", + "THROW", + "THIS", + "INSTANCEOF", + "BOOLNOT", + "BWNOT", + "MUL", + "DIV", + "REM", + "ADD", + "SUB", + "LSH", + "RSH", + "USH", + "LT", + "LTE", + "GT", + "GTE", + "EQ", + "EQR", + "NE", + "NER", + "BWAND", + "XOR", + "BWOR", + "BOOLAND", + "BOOLOR", + "COND", + "COLON", + "ELVIS", + "REF", + "ARROW", + "FIND", + "MATCH", + "INCR", + "DECR", + "ASSIGN", + "AADD", + "ASUB", + "AMUL", + "ADIV", + "AREM", + "AAND", + "AXOR", + "AOR", + "ALSH", + "ARSH", + "AUSH", + "OCTAL", + "HEX", + "INTEGER", + "DECIMAL", + "STRING", + "REGEX", + "TRUE", + "FALSE", + "NULL", + "PRIMITIVE", + "DEF", + "ID", + "DOTINTEGER", + "DOTID" }; + } + + private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); /** @@ -281,7 +308,7 @@ public Vocabulary getVocabulary() { @Override public String getGrammarFileName() { - return "PainlessParser.g4"; + return "java-escape"; } @Override @@ -304,6 +331,7 @@ public PainlessParser(TokenStream input) { _interp = new ParserATNSimulator(this, _ATN, _decisionToDFA, _sharedContextCache); } + @SuppressWarnings("CheckReturnValue") public static class SourceContext extends ParserRuleContext { public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); @@ -368,14 +396,8 @@ public final SourceContext source() throws RecognitionException { setState(87); _errHandler.sync(this); _la = _input.LA(1); - while ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) - | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) - | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (PRIMITIVE - 73)) | (1L << (DEF - 73)) | (1L << (ID - 73)))) != 0)) { + while (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310161040032L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 4095L) != 0) { { { setState(84); @@ -399,6 +421,7 @@ public final SourceContext source() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class FunctionContext extends ParserRuleContext { public DecltypeContext decltype() { return getRuleContext(DecltypeContext.class, 0); @@ -457,6 +480,7 @@ public final FunctionContext function() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ParametersContext extends ParserRuleContext { public TerminalNode LP() { return getToken(PainlessParser.LP, 0); @@ -516,9 +540,9 @@ public final ParametersContext parameters() throws RecognitionException { setState(97); match(LP); setState(109); + _errHandler.sync(this); _la = _input.LA(1); - if (((((_la - 82)) & ~0x3f) == 0 - && ((1L << (_la - 82)) & ((1L << (PRIMITIVE - 82)) | (1L << (DEF - 82)) | (1L << (ID - 82)))) != 0)) { + if ((((_la - 82)) & ~0x3f) == 0 && ((1L << (_la - 82)) & 7L) != 0) { { setState(98); decltype(); @@ -558,6 +582,7 @@ public final ParametersContext parameters() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class StatementContext extends ParserRuleContext { public RstatementContext rstatement() { return getRuleContext(RstatementContext.class, 0); @@ -597,6 +622,7 @@ public final StatementContext statement() throws RecognitionException { int _la; try { setState(117); + _errHandler.sync(this); switch (_input.LA(1)) { case IF: case WHILE: @@ -642,6 +668,8 @@ public final StatementContext statement() throws RecognitionException { if (!(_la == EOF || _la == SEMICOLON)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } } @@ -659,6 +687,7 @@ public final StatementContext statement() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class RstatementContext extends ParserRuleContext { public RstatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -676,6 +705,7 @@ public void copyFrom(RstatementContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class ForContext extends RstatementContext { public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); @@ -728,6 +758,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class TryContext extends RstatementContext { public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); @@ -756,6 +787,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class WhileContext extends RstatementContext { public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); @@ -792,6 +824,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class IneachContext extends RstatementContext { public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); @@ -832,6 +865,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class IfContext extends RstatementContext { public TerminalNode IF() { return getToken(PainlessParser.IF, 0); @@ -872,6 +906,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class EachContext extends RstatementContext { public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); @@ -968,6 +1003,7 @@ public final RstatementContext rstatement() throws RecognitionException { setState(132); match(RP); setState(135); + _errHandler.sync(this); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -1023,14 +1059,10 @@ public final RstatementContext rstatement() throws RecognitionException { setState(138); match(LP); setState(140); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) - | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (PRIMITIVE - 73)) | (1L << (DEF - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310068880032L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 4095L) != 0) { { setState(139); initializer(); @@ -1040,14 +1072,10 @@ public final RstatementContext rstatement() throws RecognitionException { setState(142); match(SEMICOLON); setState(144); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) - | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310068880032L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 2559L) != 0) { { setState(143); expression(); @@ -1057,14 +1085,10 @@ public final RstatementContext rstatement() throws RecognitionException { setState(146); match(SEMICOLON); setState(148); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) - | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310068880032L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 2559L) != 0) { { setState(147); afterthought(); @@ -1074,6 +1098,7 @@ public final RstatementContext rstatement() throws RecognitionException { setState(150); match(RP); setState(153); + _errHandler.sync(this); switch (_input.LA(1)) { case LBRACK: case LBRACE: @@ -1200,6 +1225,7 @@ public final RstatementContext rstatement() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class DstatementContext extends ParserRuleContext { public DstatementContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -1217,6 +1243,7 @@ public void copyFrom(DstatementContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class DeclContext extends DstatementContext { public DeclarationContext declaration() { return getRuleContext(DeclarationContext.class, 0); @@ -1233,6 +1260,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class BreakContext extends DstatementContext { public TerminalNode BREAK() { return getToken(PainlessParser.BREAK, 0); @@ -1249,6 +1277,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ThrowContext extends DstatementContext { public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); @@ -1269,6 +1298,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ContinueContext extends DstatementContext { public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); @@ -1285,6 +1315,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ExprContext extends DstatementContext { public ExpressionContext expression() { return getRuleContext(ExpressionContext.class, 0); @@ -1301,6 +1332,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class DoContext extends DstatementContext { public TerminalNode DO() { return getToken(PainlessParser.DO, 0); @@ -1337,6 +1369,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ReturnContext extends DstatementContext { public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); @@ -1409,14 +1442,10 @@ public final DstatementContext dstatement() throws RecognitionException { setState(191); match(RETURN); setState(193); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) - | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310068880032L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 2559L) != 0) { { setState(192); expression(); @@ -1452,6 +1481,7 @@ public final DstatementContext dstatement() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class TrailerContext extends ParserRuleContext { public BlockContext block() { return getRuleContext(BlockContext.class, 0); @@ -1482,6 +1512,7 @@ public final TrailerContext trailer() throws RecognitionException { enterRule(_localctx, 12, RULE_trailer); try { setState(202); + _errHandler.sync(this); switch (_input.LA(1)) { case LBRACK: enterOuterAlt(_localctx, 1); { @@ -1538,6 +1569,7 @@ public final TrailerContext trailer() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class BlockContext extends ParserRuleContext { public TerminalNode LBRACK() { return getToken(PainlessParser.LBRACK, 0); @@ -1602,15 +1634,10 @@ public final BlockContext block() throws RecognitionException { _alt = getInterpreter().adaptivePredict(_input, 16, _ctx); } setState(212); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L - << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L - << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (PRIMITIVE - 73)) | (1L << (DEF - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310143591072L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 4095L) != 0) { { setState(211); dstatement(); @@ -1630,6 +1657,7 @@ public final BlockContext block() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class EmptyContext extends ParserRuleContext { public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); @@ -1670,6 +1698,7 @@ public final EmptyContext empty() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class InitializerContext extends ParserRuleContext { public DeclarationContext declaration() { return getRuleContext(DeclarationContext.class, 0); @@ -1725,6 +1754,7 @@ public final InitializerContext initializer() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class AfterthoughtContext extends ParserRuleContext { public ExpressionContext expression() { return getRuleContext(ExpressionContext.class, 0); @@ -1765,6 +1795,7 @@ public final AfterthoughtContext afterthought() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class DeclarationContext extends ParserRuleContext { public DecltypeContext decltype() { return getRuleContext(DecltypeContext.class, 0); @@ -1840,6 +1871,7 @@ public final DeclarationContext declaration() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class DecltypeContext extends ParserRuleContext { public TypeContext type() { return getRuleContext(TypeContext.class, 0); @@ -1915,6 +1947,7 @@ public final DecltypeContext decltype() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class TypeContext extends ParserRuleContext { public TerminalNode DEF() { return getToken(PainlessParser.DEF, 0); @@ -1966,6 +1999,7 @@ public final TypeContext type() throws RecognitionException { try { int _alt; setState(251); + _errHandler.sync(this); switch (_input.LA(1)) { case DEF: enterOuterAlt(_localctx, 1); { @@ -2016,6 +2050,7 @@ public final TypeContext type() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class DeclvarContext extends ParserRuleContext { public TerminalNode ID() { return getToken(PainlessParser.ID, 0); @@ -2055,6 +2090,7 @@ public final DeclvarContext declvar() throws RecognitionException { setState(253); match(ID); setState(256); + _errHandler.sync(this); _la = _input.LA(1); if (_la == ASSIGN) { { @@ -2076,6 +2112,7 @@ public final DeclvarContext declvar() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class TrapContext extends ParserRuleContext { public TerminalNode CATCH() { return getToken(PainlessParser.CATCH, 0); @@ -2146,6 +2183,7 @@ public final TrapContext trap() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class NoncondexpressionContext extends ParserRuleContext { public NoncondexpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -2163,6 +2201,7 @@ public void copyFrom(NoncondexpressionContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class SingleContext extends NoncondexpressionContext { public UnaryContext unary() { return getRuleContext(UnaryContext.class, 0); @@ -2179,6 +2218,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class CompContext extends NoncondexpressionContext { public List noncondexpression() { return getRuleContexts(NoncondexpressionContext.class); @@ -2231,6 +2271,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class BoolContext extends NoncondexpressionContext { public List noncondexpression() { return getRuleContexts(NoncondexpressionContext.class); @@ -2259,6 +2300,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class BinaryContext extends NoncondexpressionContext { public List noncondexpression() { return getRuleContexts(NoncondexpressionContext.class); @@ -2331,6 +2373,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ElvisContext extends NoncondexpressionContext { public List noncondexpression() { return getRuleContexts(NoncondexpressionContext.class); @@ -2355,6 +2398,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class InstanceofContext extends NoncondexpressionContext { public NoncondexpressionContext noncondexpression() { return getRuleContext(NoncondexpressionContext.class, 0); @@ -2422,9 +2466,11 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); setState(269); _la = _input.LA(1); - if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0))) { + if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 15032385536L) != 0)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(270); @@ -2441,6 +2487,8 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(_la == ADD || _la == SUB)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(273); @@ -2457,6 +2505,8 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(_la == FIND || _la == MATCH)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(276); @@ -2470,9 +2520,11 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); setState(278); _la = _input.LA(1); - if (!((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0))) { + if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 481036337152L) != 0)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(279); @@ -2486,10 +2538,11 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); setState(281); _la = _input.LA(1); - if (!((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0))) { + if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 8246337208320L) != 0)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(282); @@ -2503,10 +2556,11 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); setState(284); _la = _input.LA(1); - if (!((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0))) { + if (!(((_la) & ~0x3f) == 0 && ((1L << _la) & 131941395333120L) != 0)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(285); @@ -2608,6 +2662,7 @@ private NoncondexpressionContext noncondexpression(int _p) throws RecognitionExc return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ExpressionContext extends ParserRuleContext { public ExpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -2625,6 +2680,7 @@ public void copyFrom(ExpressionContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class ConditionalContext extends ExpressionContext { public NoncondexpressionContext noncondexpression() { return getRuleContext(NoncondexpressionContext.class, 0); @@ -2657,6 +2713,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class AssignmentContext extends ExpressionContext { public NoncondexpressionContext noncondexpression() { return getRuleContext(NoncondexpressionContext.class, 0); @@ -2725,6 +2782,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NonconditionalContext extends ExpressionContext { public NoncondexpressionContext noncondexpression() { return getRuleContext(NoncondexpressionContext.class, 0); @@ -2778,12 +2836,11 @@ public final ExpressionContext expression() throws RecognitionException { noncondexpression(0); setState(320); _la = _input.LA(1); - if (!(((((_la - 61)) & ~0x3f) == 0 - && ((1L << (_la - 61)) & ((1L << (ASSIGN - 61)) | (1L << (AADD - 61)) | (1L << (ASUB - 61)) | (1L << (AMUL - 61)) - | (1L << (ADIV - 61)) | (1L << (AREM - 61)) | (1L << (AAND - 61)) | (1L << (AXOR - 61)) | (1L << (AOR - 61)) - | (1L << (ALSH - 61)) | (1L << (ARSH - 61)) | (1L << (AUSH - 61)))) != 0))) { + if (!((((_la - 61)) & ~0x3f) == 0 && ((1L << (_la - 61)) & 4095L) != 0)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(321); @@ -2801,6 +2858,7 @@ public final ExpressionContext expression() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class UnaryContext extends ParserRuleContext { public UnaryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -2818,6 +2876,7 @@ public void copyFrom(UnaryContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class NotaddsubContext extends UnaryContext { public UnarynotaddsubContext unarynotaddsub() { return getRuleContext(UnarynotaddsubContext.class, 0); @@ -2834,6 +2893,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class PreContext extends UnaryContext { public ChainContext chain() { return getRuleContext(ChainContext.class, 0); @@ -2858,6 +2918,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class AddsubContext extends UnaryContext { public UnaryContext unary() { return getRuleContext(UnaryContext.class, 0); @@ -2888,6 +2949,7 @@ public final UnaryContext unary() throws RecognitionException { int _la; try { setState(330); + _errHandler.sync(this); switch (_input.LA(1)) { case INCR: case DECR: @@ -2898,6 +2960,8 @@ public final UnaryContext unary() throws RecognitionException { if (!(_la == INCR || _la == DECR)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(326); @@ -2913,6 +2977,8 @@ public final UnaryContext unary() throws RecognitionException { if (!(_la == ADD || _la == SUB)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(328); @@ -2954,6 +3020,7 @@ public final UnaryContext unary() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class UnarynotaddsubContext extends ParserRuleContext { public UnarynotaddsubContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -2971,6 +3038,7 @@ public void copyFrom(UnarynotaddsubContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class CastContext extends UnarynotaddsubContext { public CastexpressionContext castexpression() { return getRuleContext(CastexpressionContext.class, 0); @@ -2987,6 +3055,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NotContext extends UnarynotaddsubContext { public UnaryContext unary() { return getRuleContext(UnaryContext.class, 0); @@ -3011,6 +3080,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ReadContext extends UnarynotaddsubContext { public ChainContext chain() { return getRuleContext(ChainContext.class, 0); @@ -3027,6 +3097,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class PostContext extends UnarynotaddsubContext { public ChainContext chain() { return getRuleContext(ChainContext.class, 0); @@ -3076,6 +3147,8 @@ public final UnarynotaddsubContext unarynotaddsub() throws RecognitionException if (!(_la == INCR || _la == DECR)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } } @@ -3088,6 +3161,8 @@ public final UnarynotaddsubContext unarynotaddsub() throws RecognitionException if (!(_la == BOOLNOT || _la == BWNOT)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(337); @@ -3112,6 +3187,7 @@ public final UnarynotaddsubContext unarynotaddsub() throws RecognitionException return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class CastexpressionContext extends ParserRuleContext { public CastexpressionContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -3129,6 +3205,7 @@ public void copyFrom(CastexpressionContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class RefcastContext extends CastexpressionContext { public TerminalNode LP() { return getToken(PainlessParser.LP, 0); @@ -3157,6 +3234,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class PrimordefcastContext extends CastexpressionContext { public TerminalNode LP() { return getToken(PainlessParser.LP, 0); @@ -3229,6 +3307,7 @@ public final CastexpressionContext castexpression() throws RecognitionException return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class PrimordefcasttypeContext extends ParserRuleContext { public TerminalNode DEF() { return getToken(PainlessParser.DEF, 0); @@ -3268,6 +3347,8 @@ public final PrimordefcasttypeContext primordefcasttype() throws RecognitionExce if (!(_la == PRIMITIVE || _la == DEF)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } } @@ -3281,6 +3362,7 @@ public final PrimordefcasttypeContext primordefcasttype() throws RecognitionExce return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class RefcasttypeContext extends ParserRuleContext { public TerminalNode DEF() { return getToken(PainlessParser.DEF, 0); @@ -3348,6 +3430,7 @@ public final RefcasttypeContext refcasttype() throws RecognitionException { int _la; try { setState(384); + _errHandler.sync(this); switch (_input.LA(1)) { case DEF: enterOuterAlt(_localctx, 1); { @@ -3444,6 +3527,7 @@ public final RefcasttypeContext refcasttype() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ChainContext extends ParserRuleContext { public ChainContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -3461,6 +3545,7 @@ public void copyFrom(ChainContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class DynamicContext extends ChainContext { public PrimaryContext primary() { return getRuleContext(PrimaryContext.class, 0); @@ -3485,6 +3570,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NewarrayContext extends ChainContext { public ArrayinitializerContext arrayinitializer() { return getRuleContext(ArrayinitializerContext.class, 0); @@ -3550,6 +3636,7 @@ public final ChainContext chain() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class PrimaryContext extends ParserRuleContext { public PrimaryContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -3567,6 +3654,7 @@ public void copyFrom(PrimaryContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class ListinitContext extends PrimaryContext { public ListinitializerContext listinitializer() { return getRuleContext(ListinitializerContext.class, 0); @@ -3583,6 +3671,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class RegexContext extends PrimaryContext { public TerminalNode REGEX() { return getToken(PainlessParser.REGEX, 0); @@ -3599,6 +3688,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NullContext extends PrimaryContext { public TerminalNode NULL() { return getToken(PainlessParser.NULL, 0); @@ -3615,6 +3705,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class StringContext extends PrimaryContext { public TerminalNode STRING() { return getToken(PainlessParser.STRING, 0); @@ -3631,6 +3722,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class MapinitContext extends PrimaryContext { public MapinitializerContext mapinitializer() { return getRuleContext(MapinitializerContext.class, 0); @@ -3647,6 +3739,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class CalllocalContext extends PrimaryContext { public ArgumentsContext arguments() { return getRuleContext(ArgumentsContext.class, 0); @@ -3671,6 +3764,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class TrueContext extends PrimaryContext { public TerminalNode TRUE() { return getToken(PainlessParser.TRUE, 0); @@ -3687,6 +3781,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class FalseContext extends PrimaryContext { public TerminalNode FALSE() { return getToken(PainlessParser.FALSE, 0); @@ -3703,6 +3798,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class VariableContext extends PrimaryContext { public TerminalNode ID() { return getToken(PainlessParser.ID, 0); @@ -3719,6 +3815,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NumericContext extends PrimaryContext { public TerminalNode OCTAL() { return getToken(PainlessParser.OCTAL, 0); @@ -3747,6 +3844,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NewobjectContext extends PrimaryContext { public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); @@ -3771,6 +3869,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class PrecedenceContext extends PrimaryContext { public TerminalNode LP() { return getToken(PainlessParser.LP, 0); @@ -3819,11 +3918,11 @@ public final PrimaryContext primary() throws RecognitionException { enterOuterAlt(_localctx, 2); { setState(400); _la = _input.LA(1); - if (!(((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)))) != 0))) { + if (!((((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 15L) != 0)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } } @@ -3892,6 +3991,8 @@ public final PrimaryContext primary() throws RecognitionException { if (!(_la == DOLLAR || _la == ID)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(410); @@ -3920,6 +4021,7 @@ public final PrimaryContext primary() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class PostfixContext extends ParserRuleContext { public CallinvokeContext callinvoke() { return getRuleContext(CallinvokeContext.class, 0); @@ -3985,6 +4087,7 @@ public final PostfixContext postfix() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class PostdotContext extends ParserRuleContext { public CallinvokeContext callinvoke() { return getRuleContext(CallinvokeContext.class, 0); @@ -4040,6 +4143,7 @@ public final PostdotContext postdot() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class CallinvokeContext extends ParserRuleContext { public TerminalNode DOTID() { return getToken(PainlessParser.DOTID, 0); @@ -4085,6 +4189,8 @@ public final CallinvokeContext callinvoke() throws RecognitionException { if (!(_la == DOT || _la == NSDOT)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(427); @@ -4102,6 +4208,7 @@ public final CallinvokeContext callinvoke() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class FieldaccessContext extends ParserRuleContext { public TerminalNode DOT() { return getToken(PainlessParser.DOT, 0); @@ -4147,6 +4254,8 @@ public final FieldaccessContext fieldaccess() throws RecognitionException { if (!(_la == DOT || _la == NSDOT)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } setState(431); @@ -4154,6 +4263,8 @@ public final FieldaccessContext fieldaccess() throws RecognitionException { if (!(_la == DOTINTEGER || _la == DOTID)) { _errHandler.recoverInline(this); } else { + if (_input.LA(1) == Token.EOF) matchedEOF = true; + _errHandler.reportMatch(this); consume(); } } @@ -4167,6 +4278,7 @@ public final FieldaccessContext fieldaccess() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class BraceaccessContext extends ParserRuleContext { public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); @@ -4219,6 +4331,7 @@ public final BraceaccessContext braceaccess() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ArrayinitializerContext extends ParserRuleContext { public ArrayinitializerContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -4236,6 +4349,7 @@ public void copyFrom(ArrayinitializerContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class NewstandardarrayContext extends ArrayinitializerContext { public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); @@ -4292,6 +4406,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class NewinitializedarrayContext extends ArrayinitializerContext { public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); @@ -4434,14 +4549,10 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept setState(460); match(LBRACK); setState(469); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) - | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310068880032L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 2559L) != 0) { { setState(461); expression(); @@ -4495,6 +4606,7 @@ public final ArrayinitializerContext arrayinitializer() throws RecognitionExcept return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ListinitializerContext extends ParserRuleContext { public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); @@ -4589,6 +4701,7 @@ public final ListinitializerContext listinitializer() throws RecognitionExceptio return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class MapinitializerContext extends ParserRuleContext { public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); @@ -4689,6 +4802,7 @@ public final MapinitializerContext mapinitializer() throws RecognitionException return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class MaptokenContext extends ParserRuleContext { public List expression() { return getRuleContexts(ExpressionContext.class); @@ -4741,6 +4855,7 @@ public final MaptokenContext maptoken() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ArgumentsContext extends ParserRuleContext { public TerminalNode LP() { return getToken(PainlessParser.LP, 0); @@ -4793,14 +4908,10 @@ public final ArgumentsContext arguments() throws RecognitionException { setState(515); match(LP); setState(524); + _errHandler.sync(this); _la = _input.LA(1); - if ((((_la) & ~0x3f) == 0 - && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DOLLAR) | (1L << NEW) | (1L << THIS) | (1L << BOOLNOT) | (1L - << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) - || ((((_la - 73)) & ~0x3f) == 0 - && ((1L << (_la - 73)) & ((1L << (OCTAL - 73)) | (1L << (HEX - 73)) | (1L << (INTEGER - 73)) | (1L << (DECIMAL - - 73)) | (1L << (STRING - 73)) | (1L << (REGEX - 73)) | (1L << (TRUE - 73)) | (1L << (FALSE - 73)) | (1L - << (NULL - 73)) | (1L << (PRIMITIVE - 73)) | (1L << (DEF - 73)) | (1L << (ID - 73)))) != 0)) { + if (((_la) & ~0x3f) == 0 && ((1L << _la) & 1729382310203097760L) != 0 + || (((_la - 73)) & ~0x3f) == 0 && ((1L << (_la - 73)) & 4095L) != 0) { { setState(516); argument(); @@ -4837,6 +4948,7 @@ public final ArgumentsContext arguments() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class ArgumentContext extends ParserRuleContext { public ExpressionContext expression() { return getRuleContext(ExpressionContext.class, 0); @@ -4902,6 +5014,7 @@ public final ArgumentContext argument() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class LambdaContext extends ParserRuleContext { public TerminalNode ARROW() { return getToken(PainlessParser.ARROW, 0); @@ -4963,6 +5076,7 @@ public final LambdaContext lambda() throws RecognitionException { enterOuterAlt(_localctx, 1); { setState(546); + _errHandler.sync(this); switch (_input.LA(1)) { case PRIMITIVE: case DEF: @@ -4975,9 +5089,9 @@ public final LambdaContext lambda() throws RecognitionException { setState(534); match(LP); setState(543); + _errHandler.sync(this); _la = _input.LA(1); - if (((((_la - 82)) & ~0x3f) == 0 - && ((1L << (_la - 82)) & ((1L << (PRIMITIVE - 82)) | (1L << (DEF - 82)) | (1L << (ID - 82)))) != 0)) { + if ((((_la - 82)) & ~0x3f) == 0 && ((1L << (_la - 82)) & 7L) != 0) { { setState(535); lamtype(); @@ -5010,6 +5124,7 @@ public final LambdaContext lambda() throws RecognitionException { setState(548); match(ARROW); setState(551); + _errHandler.sync(this); switch (_input.LA(1)) { case LBRACK: { setState(549); @@ -5054,6 +5169,7 @@ public final LambdaContext lambda() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class LamtypeContext extends ParserRuleContext { public TerminalNode ID() { return getToken(PainlessParser.ID, 0); @@ -5107,6 +5223,7 @@ public final LamtypeContext lamtype() throws RecognitionException { return _localctx; } + @SuppressWarnings("CheckReturnValue") public static class FuncrefContext extends ParserRuleContext { public FuncrefContext(ParserRuleContext parent, int invokingState) { super(parent, invokingState); @@ -5124,6 +5241,7 @@ public void copyFrom(FuncrefContext ctx) { } } + @SuppressWarnings("CheckReturnValue") public static class ClassfuncrefContext extends FuncrefContext { public DecltypeContext decltype() { return getRuleContext(DecltypeContext.class, 0); @@ -5148,6 +5266,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class ConstructorfuncrefContext extends FuncrefContext { public DecltypeContext decltype() { return getRuleContext(DecltypeContext.class, 0); @@ -5174,6 +5293,7 @@ public T accept(ParseTreeVisitor visitor) { } } + @SuppressWarnings("CheckReturnValue") public static class LocalfuncrefContext extends FuncrefContext { public TerminalNode THIS() { return getToken(PainlessParser.THIS, 0); @@ -5299,224 +5419,395 @@ private boolean noncondexpression_sempred(NoncondexpressionContext _localctx, in return true; } - public static final String _serializedATN = "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3X\u023e\4\2\t\2\4" - + "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t" - + "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22" - + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31" - + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!" - + "\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\3\2\7\2R\n\2\f\2\16" - + "\2U\13\2\3\2\7\2X\n\2\f\2\16\2[\13\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3" - + "\4\3\4\3\4\3\4\3\4\3\4\7\4k\n\4\f\4\16\4n\13\4\5\4p\n\4\3\4\3\4\3\5\3" - + "\5\3\5\3\5\5\5x\n\5\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6\u0082\n\6\3\6" - + "\3\6\3\6\3\6\3\6\3\6\5\6\u008a\n\6\3\6\3\6\3\6\5\6\u008f\n\6\3\6\3\6\5" - + "\6\u0093\n\6\3\6\3\6\5\6\u0097\n\6\3\6\3\6\3\6\5\6\u009c\n\6\3\6\3\6\3" - + "\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6" - + "\6\6\u00b2\n\6\r\6\16\6\u00b3\5\6\u00b6\n\6\3\7\3\7\3\7\3\7\3\7\3\7\3" - + "\7\3\7\3\7\3\7\3\7\3\7\5\7\u00c4\n\7\3\7\3\7\3\7\5\7\u00c9\n\7\3\b\3\b" - + "\5\b\u00cd\n\b\3\t\3\t\7\t\u00d1\n\t\f\t\16\t\u00d4\13\t\3\t\5\t\u00d7" - + "\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00df\n\13\3\f\3\f\3\r\3\r\3\r\3\r" - + "\7\r\u00e7\n\r\f\r\16\r\u00ea\13\r\3\16\3\16\3\16\7\16\u00ef\n\16\f\16" - + "\16\16\u00f2\13\16\3\17\3\17\3\17\3\17\3\17\7\17\u00f9\n\17\f\17\16\17" - + "\u00fc\13\17\5\17\u00fe\n\17\3\20\3\20\3\20\5\20\u0103\n\20\3\21\3\21" - + "\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22" - + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22" - + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22" - + "\3\22\3\22\3\22\3\22\3\22\7\22\u0136\n\22\f\22\16\22\u0139\13\22\3\23" - + "\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\5\23\u0146\n\23\3\24" - + "\3\24\3\24\3\24\3\24\5\24\u014d\n\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25" - + "\5\25\u0156\n\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\26\5\26" - + "\u0162\n\26\3\27\3\27\3\30\3\30\3\30\6\30\u0169\n\30\r\30\16\30\u016a" - + "\3\30\3\30\3\30\6\30\u0170\n\30\r\30\16\30\u0171\3\30\3\30\3\30\7\30\u0177" - + "\n\30\f\30\16\30\u017a\13\30\3\30\3\30\7\30\u017e\n\30\f\30\16\30\u0181" - + "\13\30\5\30\u0183\n\30\3\31\3\31\7\31\u0187\n\31\f\31\16\31\u018a\13\31" - + "\3\31\5\31\u018d\n\31\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32" - + "\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\5\32\u01a2\n\32\3\33\3\33" - + "\3\33\5\33\u01a7\n\33\3\34\3\34\5\34\u01ab\n\34\3\35\3\35\3\35\3\35\3" - + "\36\3\36\3\36\3\37\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \6 \u01be\n \r \16" - + " \u01bf\3 \3 \7 \u01c4\n \f \16 \u01c7\13 \5 \u01c9\n \3 \3 \3 \3 \3 " - + "\3 \3 \3 \7 \u01d3\n \f \16 \u01d6\13 \5 \u01d8\n \3 \3 \7 \u01dc\n \f" - + " \16 \u01df\13 \5 \u01e1\n \3!\3!\3!\3!\7!\u01e7\n!\f!\16!\u01ea\13!\3" - + "!\3!\3!\3!\5!\u01f0\n!\3\"\3\"\3\"\3\"\7\"\u01f6\n\"\f\"\16\"\u01f9\13" - + "\"\3\"\3\"\3\"\3\"\3\"\5\"\u0200\n\"\3#\3#\3#\3#\3$\3$\3$\3$\7$\u020a" - + "\n$\f$\16$\u020d\13$\5$\u020f\n$\3$\3$\3%\3%\3%\5%\u0216\n%\3&\3&\3&\3" - + "&\3&\7&\u021d\n&\f&\16&\u0220\13&\5&\u0222\n&\3&\5&\u0225\n&\3&\3&\3&" - + "\5&\u022a\n&\3\'\5\'\u022d\n\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3(\3(\3(\3(\3" - + "(\5(\u023c\n(\3(\2\3\")\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&(*" - + ",.\60\62\64\668:<>@BDFHJLN\2\21\3\3\17\17\3\2!#\3\2$%\3\2;<\3\2&(\3\2" - + "),\3\2-\60\3\2?J\3\2=>\3\2\37 \3\2TU\3\2KN\4\2\13\13VV\3\2\f\r\3\2WX\u0279" - + "\2S\3\2\2\2\4^\3\2\2\2\6c\3\2\2\2\bw\3\2\2\2\n\u00b5\3\2\2\2\f\u00c8\3" - + "\2\2\2\16\u00cc\3\2\2\2\20\u00ce\3\2\2\2\22\u00da\3\2\2\2\24\u00de\3\2" - + "\2\2\26\u00e0\3\2\2\2\30\u00e2\3\2\2\2\32\u00eb\3\2\2\2\34\u00fd\3\2\2" - + "\2\36\u00ff\3\2\2\2 \u0104\3\2\2\2\"\u010b\3\2\2\2$\u0145\3\2\2\2&\u014c" - + "\3\2\2\2(\u0155\3\2\2\2*\u0161\3\2\2\2,\u0163\3\2\2\2.\u0182\3\2\2\2\60" - + "\u018c\3\2\2\2\62\u01a1\3\2\2\2\64\u01a6\3\2\2\2\66\u01aa\3\2\2\28\u01ac" - + "\3\2\2\2:\u01b0\3\2\2\2<\u01b3\3\2\2\2>\u01e0\3\2\2\2@\u01ef\3\2\2\2B" - + "\u01ff\3\2\2\2D\u0201\3\2\2\2F\u0205\3\2\2\2H\u0215\3\2\2\2J\u0224\3\2" - + "\2\2L\u022c\3\2\2\2N\u023b\3\2\2\2PR\5\4\3\2QP\3\2\2\2RU\3\2\2\2SQ\3\2" - + "\2\2ST\3\2\2\2TY\3\2\2\2US\3\2\2\2VX\5\b\5\2WV\3\2\2\2X[\3\2\2\2YW\3\2" - + "\2\2YZ\3\2\2\2Z\\\3\2\2\2[Y\3\2\2\2\\]\7\2\2\3]\3\3\2\2\2^_\5\32\16\2" - + "_`\7V\2\2`a\5\6\4\2ab\5\20\t\2b\5\3\2\2\2co\7\t\2\2de\5\32\16\2el\7V\2" - + "\2fg\7\16\2\2gh\5\32\16\2hi\7V\2\2ik\3\2\2\2jf\3\2\2\2kn\3\2\2\2lj\3\2" - + "\2\2lm\3\2\2\2mp\3\2\2\2nl\3\2\2\2od\3\2\2\2op\3\2\2\2pq\3\2\2\2qr\7\n" - + "\2\2r\7\3\2\2\2sx\5\n\6\2tu\5\f\7\2uv\t\2\2\2vx\3\2\2\2ws\3\2\2\2wt\3" - + "\2\2\2x\t\3\2\2\2yz\7\20\2\2z{\7\t\2\2{|\5$\23\2|}\7\n\2\2}\u0081\5\16" - + "\b\2~\177\7\22\2\2\177\u0082\5\16\b\2\u0080\u0082\6\6\2\2\u0081~\3\2\2" - + "\2\u0081\u0080\3\2\2\2\u0082\u00b6\3\2\2\2\u0083\u0084\7\23\2\2\u0084" - + "\u0085\7\t\2\2\u0085\u0086\5$\23\2\u0086\u0089\7\n\2\2\u0087\u008a\5\16" - + "\b\2\u0088\u008a\5\22\n\2\u0089\u0087\3\2\2\2\u0089\u0088\3\2\2\2\u008a" - + "\u00b6\3\2\2\2\u008b\u008c\7\25\2\2\u008c\u008e\7\t\2\2\u008d\u008f\5" - + "\24\13\2\u008e\u008d\3\2\2\2\u008e\u008f\3\2\2\2\u008f\u0090\3\2\2\2\u0090" - + "\u0092\7\17\2\2\u0091\u0093\5$\23\2\u0092\u0091\3\2\2\2\u0092\u0093\3" - + "\2\2\2\u0093\u0094\3\2\2\2\u0094\u0096\7\17\2\2\u0095\u0097\5\26\f\2\u0096" - + "\u0095\3\2\2\2\u0096\u0097\3\2\2\2\u0097\u0098\3\2\2\2\u0098\u009b\7\n" - + "\2\2\u0099\u009c\5\16\b\2\u009a\u009c\5\22\n\2\u009b\u0099\3\2\2\2\u009b" - + "\u009a\3\2\2\2\u009c\u00b6\3\2\2\2\u009d\u009e\7\25\2\2\u009e\u009f\7" - + "\t\2\2\u009f\u00a0\5\32\16\2\u00a0\u00a1\7V\2\2\u00a1\u00a2\7\67\2\2\u00a2" - + "\u00a3\5$\23\2\u00a3\u00a4\7\n\2\2\u00a4\u00a5\5\16\b\2\u00a5\u00b6\3" - + "\2\2\2\u00a6\u00a7\7\25\2\2\u00a7\u00a8\7\t\2\2\u00a8\u00a9\7V\2\2\u00a9" - + "\u00aa\7\21\2\2\u00aa\u00ab\5$\23\2\u00ab\u00ac\7\n\2\2\u00ac\u00ad\5" - + "\16\b\2\u00ad\u00b6\3\2\2\2\u00ae\u00af\7\32\2\2\u00af\u00b1\5\20\t\2" - + "\u00b0\u00b2\5 \21\2\u00b1\u00b0\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3\u00b1" - + "\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b6\3\2\2\2\u00b5y\3\2\2\2\u00b5" - + "\u0083\3\2\2\2\u00b5\u008b\3\2\2\2\u00b5\u009d\3\2\2\2\u00b5\u00a6\3\2" - + "\2\2\u00b5\u00ae\3\2\2\2\u00b6\13\3\2\2\2\u00b7\u00b8\7\24\2\2\u00b8\u00b9" - + "\5\20\t\2\u00b9\u00ba\7\23\2\2\u00ba\u00bb\7\t\2\2\u00bb\u00bc\5$\23\2" - + "\u00bc\u00bd\7\n\2\2\u00bd\u00c9\3\2\2\2\u00be\u00c9\5\30\r\2\u00bf\u00c9" - + "\7\26\2\2\u00c0\u00c9\7\27\2\2\u00c1\u00c3\7\30\2\2\u00c2\u00c4\5$\23" - + "\2\u00c3\u00c2\3\2\2\2\u00c3\u00c4\3\2\2\2\u00c4\u00c9\3\2\2\2\u00c5\u00c6" - + "\7\34\2\2\u00c6\u00c9\5$\23\2\u00c7\u00c9\5$\23\2\u00c8\u00b7\3\2\2\2" - + "\u00c8\u00be\3\2\2\2\u00c8\u00bf\3\2\2\2\u00c8\u00c0\3\2\2\2\u00c8\u00c1" - + "\3\2\2\2\u00c8\u00c5\3\2\2\2\u00c8\u00c7\3\2\2\2\u00c9\r\3\2\2\2\u00ca" - + "\u00cd\5\20\t\2\u00cb\u00cd\5\b\5\2\u00cc\u00ca\3\2\2\2\u00cc\u00cb\3" - + "\2\2\2\u00cd\17\3\2\2\2\u00ce\u00d2\7\5\2\2\u00cf\u00d1\5\b\5\2\u00d0" - + "\u00cf\3\2\2\2\u00d1\u00d4\3\2\2\2\u00d2\u00d0\3\2\2\2\u00d2\u00d3\3\2" - + "\2\2\u00d3\u00d6\3\2\2\2\u00d4\u00d2\3\2\2\2\u00d5\u00d7\5\f\7\2\u00d6" - + "\u00d5\3\2\2\2\u00d6\u00d7\3\2\2\2\u00d7\u00d8\3\2\2\2\u00d8\u00d9\7\6" - + "\2\2\u00d9\21\3\2\2\2\u00da\u00db\7\17\2\2\u00db\23\3\2\2\2\u00dc\u00df" - + "\5\30\r\2\u00dd\u00df\5$\23\2\u00de\u00dc\3\2\2\2\u00de\u00dd\3\2\2\2" - + "\u00df\25\3\2\2\2\u00e0\u00e1\5$\23\2\u00e1\27\3\2\2\2\u00e2\u00e3\5\32" - + "\16\2\u00e3\u00e8\5\36\20\2\u00e4\u00e5\7\16\2\2\u00e5\u00e7\5\36\20\2" - + "\u00e6\u00e4\3\2\2\2\u00e7\u00ea\3\2\2\2\u00e8\u00e6\3\2\2\2\u00e8\u00e9" - + "\3\2\2\2\u00e9\31\3\2\2\2\u00ea\u00e8\3\2\2\2\u00eb\u00f0\5\34\17\2\u00ec" - + "\u00ed\7\7\2\2\u00ed\u00ef\7\b\2\2\u00ee\u00ec\3\2\2\2\u00ef\u00f2\3\2" - + "\2\2\u00f0\u00ee\3\2\2\2\u00f0\u00f1\3\2\2\2\u00f1\33\3\2\2\2\u00f2\u00f0" - + "\3\2\2\2\u00f3\u00fe\7U\2\2\u00f4\u00fe\7T\2\2\u00f5\u00fa\7V\2\2\u00f6" - + "\u00f7\7\f\2\2\u00f7\u00f9\7X\2\2\u00f8\u00f6\3\2\2\2\u00f9\u00fc\3\2" - + "\2\2\u00fa\u00f8\3\2\2\2\u00fa\u00fb\3\2\2\2\u00fb\u00fe\3\2\2\2\u00fc" - + "\u00fa\3\2\2\2\u00fd\u00f3\3\2\2\2\u00fd\u00f4\3\2\2\2\u00fd\u00f5\3\2" - + "\2\2\u00fe\35\3\2\2\2\u00ff\u0102\7V\2\2\u0100\u0101\7?\2\2\u0101\u0103" - + "\5$\23\2\u0102\u0100\3\2\2\2\u0102\u0103\3\2\2\2\u0103\37\3\2\2\2\u0104" - + "\u0105\7\33\2\2\u0105\u0106\7\t\2\2\u0106\u0107\5\34\17\2\u0107\u0108" - + "\7V\2\2\u0108\u0109\7\n\2\2\u0109\u010a\5\20\t\2\u010a!\3\2\2\2\u010b" - + "\u010c\b\22\1\2\u010c\u010d\5&\24\2\u010d\u0137\3\2\2\2\u010e\u010f\f" - + "\17\2\2\u010f\u0110\t\3\2\2\u0110\u0136\5\"\22\20\u0111\u0112\f\16\2\2" - + "\u0112\u0113\t\4\2\2\u0113\u0136\5\"\22\17\u0114\u0115\f\r\2\2\u0115\u0116" - + "\t\5\2\2\u0116\u0136\5\"\22\16\u0117\u0118\f\f\2\2\u0118\u0119\t\6\2\2" - + "\u0119\u0136\5\"\22\r\u011a\u011b\f\13\2\2\u011b\u011c\t\7\2\2\u011c\u0136" - + "\5\"\22\f\u011d\u011e\f\t\2\2\u011e\u011f\t\b\2\2\u011f\u0136\5\"\22\n" - + "\u0120\u0121\f\b\2\2\u0121\u0122\7\61\2\2\u0122\u0136\5\"\22\t\u0123\u0124" - + "\f\7\2\2\u0124\u0125\7\62\2\2\u0125\u0136\5\"\22\b\u0126\u0127\f\6\2\2" - + "\u0127\u0128\7\63\2\2\u0128\u0136\5\"\22\7\u0129\u012a\f\5\2\2\u012a\u012b" - + "\7\64\2\2\u012b\u0136\5\"\22\6\u012c\u012d\f\4\2\2\u012d\u012e\7\65\2" - + "\2\u012e\u0136\5\"\22\5\u012f\u0130\f\3\2\2\u0130\u0131\78\2\2\u0131\u0136" - + "\5\"\22\3\u0132\u0133\f\n\2\2\u0133\u0134\7\36\2\2\u0134\u0136\5\32\16" - + "\2\u0135\u010e\3\2\2\2\u0135\u0111\3\2\2\2\u0135\u0114\3\2\2\2\u0135\u0117" - + "\3\2\2\2\u0135\u011a\3\2\2\2\u0135\u011d\3\2\2\2\u0135\u0120\3\2\2\2\u0135" - + "\u0123\3\2\2\2\u0135\u0126\3\2\2\2\u0135\u0129\3\2\2\2\u0135\u012c\3\2" - + "\2\2\u0135\u012f\3\2\2\2\u0135\u0132\3\2\2\2\u0136\u0139\3\2\2\2\u0137" - + "\u0135\3\2\2\2\u0137\u0138\3\2\2\2\u0138#\3\2\2\2\u0139\u0137\3\2\2\2" - + "\u013a\u0146\5\"\22\2\u013b\u013c\5\"\22\2\u013c\u013d\7\66\2\2\u013d" - + "\u013e\5$\23\2\u013e\u013f\7\67\2\2\u013f\u0140\5$\23\2\u0140\u0146\3" - + "\2\2\2\u0141\u0142\5\"\22\2\u0142\u0143\t\t\2\2\u0143\u0144\5$\23\2\u0144" - + "\u0146\3\2\2\2\u0145\u013a\3\2\2\2\u0145\u013b\3\2\2\2\u0145\u0141\3\2" - + "\2\2\u0146%\3\2\2\2\u0147\u0148\t\n\2\2\u0148\u014d\5\60\31\2\u0149\u014a" - + "\t\4\2\2\u014a\u014d\5&\24\2\u014b\u014d\5(\25\2\u014c\u0147\3\2\2\2\u014c" - + "\u0149\3\2\2\2\u014c\u014b\3\2\2\2\u014d\'\3\2\2\2\u014e\u0156\5\60\31" - + "\2\u014f\u0150\5\60\31\2\u0150\u0151\t\n\2\2\u0151\u0156\3\2\2\2\u0152" - + "\u0153\t\13\2\2\u0153\u0156\5&\24\2\u0154\u0156\5*\26\2\u0155\u014e\3" - + "\2\2\2\u0155\u014f\3\2\2\2\u0155\u0152\3\2\2\2\u0155\u0154\3\2\2\2\u0156" - + ")\3\2\2\2\u0157\u0158\7\t\2\2\u0158\u0159\5,\27\2\u0159\u015a\7\n\2\2" - + "\u015a\u015b\5&\24\2\u015b\u0162\3\2\2\2\u015c\u015d\7\t\2\2\u015d\u015e" - + "\5.\30\2\u015e\u015f\7\n\2\2\u015f\u0160\5(\25\2\u0160\u0162\3\2\2\2\u0161" - + "\u0157\3\2\2\2\u0161\u015c\3\2\2\2\u0162+\3\2\2\2\u0163\u0164\t\f\2\2" - + "\u0164-\3\2\2\2\u0165\u0168\7U\2\2\u0166\u0167\7\7\2\2\u0167\u0169\7\b" - + "\2\2\u0168\u0166\3\2\2\2\u0169\u016a\3\2\2\2\u016a\u0168\3\2\2\2\u016a" - + "\u016b\3\2\2\2\u016b\u0183\3\2\2\2\u016c\u016f\7T\2\2\u016d\u016e\7\7" - + "\2\2\u016e\u0170\7\b\2\2\u016f\u016d\3\2\2\2\u0170\u0171\3\2\2\2\u0171" - + "\u016f\3\2\2\2\u0171\u0172\3\2\2\2\u0172\u0183\3\2\2\2\u0173\u0178\7V" - + "\2\2\u0174\u0175\7\f\2\2\u0175\u0177\7X\2\2\u0176\u0174\3\2\2\2\u0177" - + "\u017a\3\2\2\2\u0178\u0176\3\2\2\2\u0178\u0179\3\2\2\2\u0179\u017f\3\2" - + "\2\2\u017a\u0178\3\2\2\2\u017b\u017c\7\7\2\2\u017c\u017e\7\b\2\2\u017d" - + "\u017b\3\2\2\2\u017e\u0181\3\2\2\2\u017f\u017d\3\2\2\2\u017f\u0180\3\2" - + "\2\2\u0180\u0183\3\2\2\2\u0181\u017f\3\2\2\2\u0182\u0165\3\2\2\2\u0182" - + "\u016c\3\2\2\2\u0182\u0173\3\2\2\2\u0183/\3\2\2\2\u0184\u0188\5\62\32" - + "\2\u0185\u0187\5\64\33\2\u0186\u0185\3\2\2\2\u0187\u018a\3\2\2\2\u0188" - + "\u0186\3\2\2\2\u0188\u0189\3\2\2\2\u0189\u018d\3\2\2\2\u018a\u0188\3\2" - + "\2\2\u018b\u018d\5> \2\u018c\u0184\3\2\2\2\u018c\u018b\3\2\2\2\u018d\61" - + "\3\2\2\2\u018e\u018f\7\t\2\2\u018f\u0190\5$\23\2\u0190\u0191\7\n\2\2\u0191" - + "\u01a2\3\2\2\2\u0192\u01a2\t\r\2\2\u0193\u01a2\7Q\2\2\u0194\u01a2\7R\2" - + "\2\u0195\u01a2\7S\2\2\u0196\u01a2\7O\2\2\u0197\u01a2\7P\2\2\u0198\u01a2" - + "\5@!\2\u0199\u01a2\5B\"\2\u019a\u01a2\7V\2\2\u019b\u019c\t\16\2\2\u019c" - + "\u01a2\5F$\2\u019d\u019e\7\31\2\2\u019e\u019f\5\34\17\2\u019f\u01a0\5" - + "F$\2\u01a0\u01a2\3\2\2\2\u01a1\u018e\3\2\2\2\u01a1\u0192\3\2\2\2\u01a1" - + "\u0193\3\2\2\2\u01a1\u0194\3\2\2\2\u01a1\u0195\3\2\2\2\u01a1\u0196\3\2" - + "\2\2\u01a1\u0197\3\2\2\2\u01a1\u0198\3\2\2\2\u01a1\u0199\3\2\2\2\u01a1" - + "\u019a\3\2\2\2\u01a1\u019b\3\2\2\2\u01a1\u019d\3\2\2\2\u01a2\63\3\2\2" - + "\2\u01a3\u01a7\58\35\2\u01a4\u01a7\5:\36\2\u01a5\u01a7\5<\37\2\u01a6\u01a3" - + "\3\2\2\2\u01a6\u01a4\3\2\2\2\u01a6\u01a5\3\2\2\2\u01a7\65\3\2\2\2\u01a8" - + "\u01ab\58\35\2\u01a9\u01ab\5:\36\2\u01aa\u01a8\3\2\2\2\u01aa\u01a9\3\2" - + "\2\2\u01ab\67\3\2\2\2\u01ac\u01ad\t\17\2\2\u01ad\u01ae\7X\2\2\u01ae\u01af" - + "\5F$\2\u01af9\3\2\2\2\u01b0\u01b1\t\17\2\2\u01b1\u01b2\t\20\2\2\u01b2" - + ";\3\2\2\2\u01b3\u01b4\7\7\2\2\u01b4\u01b5\5$\23\2\u01b5\u01b6\7\b\2\2" - + "\u01b6=\3\2\2\2\u01b7\u01b8\7\31\2\2\u01b8\u01bd\5\34\17\2\u01b9\u01ba" - + "\7\7\2\2\u01ba\u01bb\5$\23\2\u01bb\u01bc\7\b\2\2\u01bc\u01be\3\2\2\2\u01bd" - + "\u01b9\3\2\2\2\u01be\u01bf\3\2\2\2\u01bf\u01bd\3\2\2\2\u01bf\u01c0\3\2" - + "\2\2\u01c0\u01c8\3\2\2\2\u01c1\u01c5\5\66\34\2\u01c2\u01c4\5\64\33\2\u01c3" - + "\u01c2\3\2\2\2\u01c4\u01c7\3\2\2\2\u01c5\u01c3\3\2\2\2\u01c5\u01c6\3\2" - + "\2\2\u01c6\u01c9\3\2\2\2\u01c7\u01c5\3\2\2\2\u01c8\u01c1\3\2\2\2\u01c8" - + "\u01c9\3\2\2\2\u01c9\u01e1\3\2\2\2\u01ca\u01cb\7\31\2\2\u01cb\u01cc\5" - + "\34\17\2\u01cc\u01cd\7\7\2\2\u01cd\u01ce\7\b\2\2\u01ce\u01d7\7\5\2\2\u01cf" - + "\u01d4\5$\23\2\u01d0\u01d1\7\16\2\2\u01d1\u01d3\5$\23\2\u01d2\u01d0\3" - + "\2\2\2\u01d3\u01d6\3\2\2\2\u01d4\u01d2\3\2\2\2\u01d4\u01d5\3\2\2\2\u01d5" - + "\u01d8\3\2\2\2\u01d6\u01d4\3\2\2\2\u01d7\u01cf\3\2\2\2\u01d7\u01d8\3\2" - + "\2\2\u01d8\u01d9\3\2\2\2\u01d9\u01dd\7\6\2\2\u01da\u01dc\5\64\33\2\u01db" - + "\u01da\3\2\2\2\u01dc\u01df\3\2\2\2\u01dd\u01db\3\2\2\2\u01dd\u01de\3\2" - + "\2\2\u01de\u01e1\3\2\2\2\u01df\u01dd\3\2\2\2\u01e0\u01b7\3\2\2\2\u01e0" - + "\u01ca\3\2\2\2\u01e1?\3\2\2\2\u01e2\u01e3\7\7\2\2\u01e3\u01e8\5$\23\2" - + "\u01e4\u01e5\7\16\2\2\u01e5\u01e7\5$\23\2\u01e6\u01e4\3\2\2\2\u01e7\u01ea" - + "\3\2\2\2\u01e8\u01e6\3\2\2\2\u01e8\u01e9\3\2\2\2\u01e9\u01eb\3\2\2\2\u01ea" - + "\u01e8\3\2\2\2\u01eb\u01ec\7\b\2\2\u01ec\u01f0\3\2\2\2\u01ed\u01ee\7\7" - + "\2\2\u01ee\u01f0\7\b\2\2\u01ef\u01e2\3\2\2\2\u01ef\u01ed\3\2\2\2\u01f0" - + "A\3\2\2\2\u01f1\u01f2\7\7\2\2\u01f2\u01f7\5D#\2\u01f3\u01f4\7\16\2\2\u01f4" - + "\u01f6\5D#\2\u01f5\u01f3\3\2\2\2\u01f6\u01f9\3\2\2\2\u01f7\u01f5\3\2\2" - + "\2\u01f7\u01f8\3\2\2\2\u01f8\u01fa\3\2\2\2\u01f9\u01f7\3\2\2\2\u01fa\u01fb" - + "\7\b\2\2\u01fb\u0200\3\2\2\2\u01fc\u01fd\7\7\2\2\u01fd\u01fe\7\67\2\2" - + "\u01fe\u0200\7\b\2\2\u01ff\u01f1\3\2\2\2\u01ff\u01fc\3\2\2\2\u0200C\3" - + "\2\2\2\u0201\u0202\5$\23\2\u0202\u0203\7\67\2\2\u0203\u0204\5$\23\2\u0204" - + "E\3\2\2\2\u0205\u020e\7\t\2\2\u0206\u020b\5H%\2\u0207\u0208\7\16\2\2\u0208" - + "\u020a\5H%\2\u0209\u0207\3\2\2\2\u020a\u020d\3\2\2\2\u020b\u0209\3\2\2" - + "\2\u020b\u020c\3\2\2\2\u020c\u020f\3\2\2\2\u020d\u020b\3\2\2\2\u020e\u0206" - + "\3\2\2\2\u020e\u020f\3\2\2\2\u020f\u0210\3\2\2\2\u0210\u0211\7\n\2\2\u0211" - + "G\3\2\2\2\u0212\u0216\5$\23\2\u0213\u0216\5J&\2\u0214\u0216\5N(\2\u0215" - + "\u0212\3\2\2\2\u0215\u0213\3\2\2\2\u0215\u0214\3\2\2\2\u0216I\3\2\2\2" - + "\u0217\u0225\5L\'\2\u0218\u0221\7\t\2\2\u0219\u021e\5L\'\2\u021a\u021b" - + "\7\16\2\2\u021b\u021d\5L\'\2\u021c\u021a\3\2\2\2\u021d\u0220\3\2\2\2\u021e" - + "\u021c\3\2\2\2\u021e\u021f\3\2\2\2\u021f\u0222\3\2\2\2\u0220\u021e\3\2" - + "\2\2\u0221\u0219\3\2\2\2\u0221\u0222\3\2\2\2\u0222\u0223\3\2\2\2\u0223" - + "\u0225\7\n\2\2\u0224\u0217\3\2\2\2\u0224\u0218\3\2\2\2\u0225\u0226\3\2" - + "\2\2\u0226\u0229\7:\2\2\u0227\u022a\5\20\t\2\u0228\u022a\5$\23\2\u0229" - + "\u0227\3\2\2\2\u0229\u0228\3\2\2\2\u022aK\3\2\2\2\u022b\u022d\5\32\16" - + "\2\u022c\u022b\3\2\2\2\u022c\u022d\3\2\2\2\u022d\u022e\3\2\2\2\u022e\u022f" - + "\7V\2\2\u022fM\3\2\2\2\u0230\u0231\5\32\16\2\u0231\u0232\79\2\2\u0232" - + "\u0233\7V\2\2\u0233\u023c\3\2\2\2\u0234\u0235\5\32\16\2\u0235\u0236\7" - + "9\2\2\u0236\u0237\7\31\2\2\u0237\u023c\3\2\2\2\u0238\u0239\7\35\2\2\u0239" - + "\u023a\79\2\2\u023a\u023c\7V\2\2\u023b\u0230\3\2\2\2\u023b\u0234\3\2\2" - + "\2\u023b\u0238\3\2\2\2\u023cO\3\2\2\2>SYlow\u0081\u0089\u008e\u0092\u0096" - + "\u009b\u00b3\u00b5\u00c3\u00c8\u00cc\u00d2\u00d6\u00de\u00e8\u00f0\u00fa" - + "\u00fd\u0102\u0135\u0137\u0145\u014c\u0155\u0161\u016a\u0171\u0178\u017f" - + "\u0182\u0188\u018c\u01a1\u01a6\u01aa\u01bf\u01c5\u01c8\u01d4\u01d7\u01dd" - + "\u01e0\u01e8\u01ef\u01f7\u01ff\u020b\u020e\u0215\u021e\u0221\u0224\u0229" - + "\u022c\u023b"; + public static final String _serializedATN = "\u0004\u0001V\u023c\u0002\u0000\u0007\u0000\u0002\u0001\u0007\u0001\u0002" + + "\u0002\u0007\u0002\u0002\u0003\u0007\u0003\u0002\u0004\u0007\u0004\u0002" + + "\u0005\u0007\u0005\u0002\u0006\u0007\u0006\u0002\u0007\u0007\u0007\u0002" + + "\b\u0007\b\u0002\t\u0007\t\u0002\n\u0007\n\u0002\u000b\u0007\u000b\u0002" + + "\f\u0007\f\u0002\r\u0007\r\u0002\u000e\u0007\u000e\u0002\u000f\u0007\u000f" + + "\u0002\u0010\u0007\u0010\u0002\u0011\u0007\u0011\u0002\u0012\u0007\u0012" + + "\u0002\u0013\u0007\u0013\u0002\u0014\u0007\u0014\u0002\u0015\u0007\u0015" + + "\u0002\u0016\u0007\u0016\u0002\u0017\u0007\u0017\u0002\u0018\u0007\u0018" + + "\u0002\u0019\u0007\u0019\u0002\u001a\u0007\u001a\u0002\u001b\u0007\u001b" + + "\u0002\u001c\u0007\u001c\u0002\u001d\u0007\u001d\u0002\u001e\u0007\u001e" + + "\u0002\u001f\u0007\u001f\u0002 \u0007 \u0002!\u0007!\u0002\"\u0007\"\u0002" + + "#\u0007#\u0002$\u0007$\u0002%\u0007%\u0002&\u0007&\u0001\u0000\u0005\u0000" + + "P\b\u0000\n\u0000\f\u0000S\t\u0000\u0001\u0000\u0005\u0000V\b\u0000\n" + + "\u0000\f\u0000Y\t\u0000\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001" + + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001\u0002" + + "\u0001\u0002\u0001\u0002\u0001\u0002\u0001\u0002\u0005\u0002i\b\u0002" + + "\n\u0002\f\u0002l\t\u0002\u0003\u0002n\b\u0002\u0001\u0002\u0001\u0002" + + "\u0001\u0003\u0001\u0003\u0001\u0003\u0001\u0003\u0003\u0003v\b\u0003" + + "\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004" + + "\u0001\u0004\u0001\u0004\u0003\u0004\u0080\b\u0004\u0001\u0004\u0001\u0004" + + "\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0003\u0004\u0088\b\u0004" + + "\u0001\u0004\u0001\u0004\u0001\u0004\u0003\u0004\u008d\b\u0004\u0001\u0004" + + "\u0001\u0004\u0003\u0004\u0091\b\u0004\u0001\u0004\u0001\u0004\u0003\u0004" + + "\u0095\b\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0003\u0004\u009a\b" + + "\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001" + + "\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001" + + "\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001\u0004\u0001" + + "\u0004\u0001\u0004\u0001\u0004\u0004\u0004\u00b0\b\u0004\u000b\u0004\f" + + "\u0004\u00b1\u0003\u0004\u00b4\b\u0004\u0001\u0005\u0001\u0005\u0001\u0005" + + "\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005\u0001\u0005" + + "\u0001\u0005\u0001\u0005\u0001\u0005\u0003\u0005\u00c2\b\u0005\u0001\u0005" + + "\u0001\u0005\u0001\u0005\u0003\u0005\u00c7\b\u0005\u0001\u0006\u0001\u0006" + + "\u0003\u0006\u00cb\b\u0006\u0001\u0007\u0001\u0007\u0005\u0007\u00cf\b" + + "\u0007\n\u0007\f\u0007\u00d2\t\u0007\u0001\u0007\u0003\u0007\u00d5\b\u0007" + + "\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\t\u0001\t\u0003\t\u00dd" + + "\b\t\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\u000b\u0001\u000b\u0005" + + "\u000b\u00e5\b\u000b\n\u000b\f\u000b\u00e8\t\u000b\u0001\f\u0001\f\u0001" + + "\f\u0005\f\u00ed\b\f\n\f\f\f\u00f0\t\f\u0001\r\u0001\r\u0001\r\u0001\r" + + "\u0001\r\u0005\r\u00f7\b\r\n\r\f\r\u00fa\t\r\u0003\r\u00fc\b\r\u0001\u000e" + + "\u0001\u000e\u0001\u000e\u0003\u000e\u0101\b\u000e\u0001\u000f\u0001\u000f" + + "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0010\u0005\u0010" + + "\u0134\b\u0010\n\u0010\f\u0010\u0137\t\u0010\u0001\u0011\u0001\u0011\u0001" + + "\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001" + + "\u0011\u0001\u0011\u0001\u0011\u0003\u0011\u0144\b\u0011\u0001\u0012\u0001" + + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0003\u0012\u014b\b\u0012\u0001" + + "\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001" + + "\u0013\u0003\u0013\u0154\b\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001" + + "\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001" + + "\u0014\u0003\u0014\u0160\b\u0014\u0001\u0015\u0001\u0015\u0001\u0016\u0001" + + "\u0016\u0001\u0016\u0004\u0016\u0167\b\u0016\u000b\u0016\f\u0016\u0168" + + "\u0001\u0016\u0001\u0016\u0001\u0016\u0004\u0016\u016e\b\u0016\u000b\u0016" + + "\f\u0016\u016f\u0001\u0016\u0001\u0016\u0001\u0016\u0005\u0016\u0175\b" + + "\u0016\n\u0016\f\u0016\u0178\t\u0016\u0001\u0016\u0001\u0016\u0005\u0016" + + "\u017c\b\u0016\n\u0016\f\u0016\u017f\t\u0016\u0003\u0016\u0181\b\u0016" + + "\u0001\u0017\u0001\u0017\u0005\u0017\u0185\b\u0017\n\u0017\f\u0017\u0188" + + "\t\u0017\u0001\u0017\u0003\u0017\u018b\b\u0017\u0001\u0018\u0001\u0018" + + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018" + + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018" + + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0003\u0018" + + "\u01a0\b\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0003\u0019\u01a5\b" + + "\u0019\u0001\u001a\u0001\u001a\u0003\u001a\u01a9\b\u001a\u0001\u001b\u0001" + + "\u001b\u0001\u001b\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001c\u0001" + + "\u001d\u0001\u001d\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001" + + "\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0004\u001e\u01bc\b\u001e\u000b" + + "\u001e\f\u001e\u01bd\u0001\u001e\u0001\u001e\u0005\u001e\u01c2\b\u001e" + + "\n\u001e\f\u001e\u01c5\t\u001e\u0003\u001e\u01c7\b\u001e\u0001\u001e\u0001" + + "\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001e\u0001" + + "\u001e\u0005\u001e\u01d1\b\u001e\n\u001e\f\u001e\u01d4\t\u001e\u0003\u001e" + + "\u01d6\b\u001e\u0001\u001e\u0001\u001e\u0005\u001e\u01da\b\u001e\n\u001e" + + "\f\u001e\u01dd\t\u001e\u0003\u001e\u01df\b\u001e\u0001\u001f\u0001\u001f" + + "\u0001\u001f\u0001\u001f\u0005\u001f\u01e5\b\u001f\n\u001f\f\u001f\u01e8" + + "\t\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0001\u001f\u0003\u001f\u01ee" + + "\b\u001f\u0001 \u0001 \u0001 \u0001 \u0005 \u01f4\b \n \f \u01f7\t \u0001" + + " \u0001 \u0001 \u0001 \u0001 \u0003 \u01fe\b \u0001!\u0001!\u0001!\u0001" + + "!\u0001\"\u0001\"\u0001\"\u0001\"\u0005\"\u0208\b\"\n\"\f\"\u020b\t\"" + + "\u0003\"\u020d\b\"\u0001\"\u0001\"\u0001#\u0001#\u0001#\u0003#\u0214\b" + + "#\u0001$\u0001$\u0001$\u0001$\u0001$\u0005$\u021b\b$\n$\f$\u021e\t$\u0003" + + "$\u0220\b$\u0001$\u0003$\u0223\b$\u0001$\u0001$\u0001$\u0003$\u0228\b" + + "$\u0001%\u0003%\u022b\b%\u0001%\u0001%\u0001&\u0001&\u0001&\u0001&\u0001" + + "&\u0001&\u0001&\u0001&\u0001&\u0001&\u0001&\u0003&\u023a\b&\u0001&\u0000" + + "\u0001 \'\u0000\u0002\u0004\u0006\b\n\f\u000e\u0010\u0012\u0014\u0016" + + "\u0018\u001a\u001c\u001e \"$&(*,.02468:<>@BDFHJL\u0000\u000f\u0001\u0001" + + "\r\r\u0001\u0000\u001f!\u0001\u0000\"#\u0001\u00009:\u0001\u0000$&\u0001" + + "\u0000\'*\u0001\u0000+.\u0001\u0000=H\u0001\u0000;<\u0001\u0000\u001d" + + "\u001e\u0001\u0000RS\u0001\u0000IL\u0002\u0000\t\tTT\u0001\u0000\n\u000b" + + "\u0001\u0000UV\u0277\u0000Q\u0001\u0000\u0000\u0000\u0002\\\u0001\u0000" + + "\u0000\u0000\u0004a\u0001\u0000\u0000\u0000\u0006u\u0001\u0000\u0000\u0000" + + "\b\u00b3\u0001\u0000\u0000\u0000\n\u00c6\u0001\u0000\u0000\u0000\f\u00ca" + + "\u0001\u0000\u0000\u0000\u000e\u00cc\u0001\u0000\u0000\u0000\u0010\u00d8" + + "\u0001\u0000\u0000\u0000\u0012\u00dc\u0001\u0000\u0000\u0000\u0014\u00de" + + "\u0001\u0000\u0000\u0000\u0016\u00e0\u0001\u0000\u0000\u0000\u0018\u00e9" + + "\u0001\u0000\u0000\u0000\u001a\u00fb\u0001\u0000\u0000\u0000\u001c\u00fd" + + "\u0001\u0000\u0000\u0000\u001e\u0102\u0001\u0000\u0000\u0000 \u0109\u0001" + + "\u0000\u0000\u0000\"\u0143\u0001\u0000\u0000\u0000$\u014a\u0001\u0000" + + "\u0000\u0000&\u0153\u0001\u0000\u0000\u0000(\u015f\u0001\u0000\u0000\u0000" + + "*\u0161\u0001\u0000\u0000\u0000,\u0180\u0001\u0000\u0000\u0000.\u018a" + + "\u0001\u0000\u0000\u00000\u019f\u0001\u0000\u0000\u00002\u01a4\u0001\u0000" + + "\u0000\u00004\u01a8\u0001\u0000\u0000\u00006\u01aa\u0001\u0000\u0000\u0000" + + "8\u01ae\u0001\u0000\u0000\u0000:\u01b1\u0001\u0000\u0000\u0000<\u01de" + + "\u0001\u0000\u0000\u0000>\u01ed\u0001\u0000\u0000\u0000@\u01fd\u0001\u0000" + + "\u0000\u0000B\u01ff\u0001\u0000\u0000\u0000D\u0203\u0001\u0000\u0000\u0000" + + "F\u0213\u0001\u0000\u0000\u0000H\u0222\u0001\u0000\u0000\u0000J\u022a" + + "\u0001\u0000\u0000\u0000L\u0239\u0001\u0000\u0000\u0000NP\u0003\u0002" + + "\u0001\u0000ON\u0001\u0000\u0000\u0000PS\u0001\u0000\u0000\u0000QO\u0001" + + "\u0000\u0000\u0000QR\u0001\u0000\u0000\u0000RW\u0001\u0000\u0000\u0000" + + "SQ\u0001\u0000\u0000\u0000TV\u0003\u0006\u0003\u0000UT\u0001\u0000\u0000" + + "\u0000VY\u0001\u0000\u0000\u0000WU\u0001\u0000\u0000\u0000WX\u0001\u0000" + + "\u0000\u0000XZ\u0001\u0000\u0000\u0000YW\u0001\u0000\u0000\u0000Z[\u0005" + + "\u0000\u0000\u0001[\u0001\u0001\u0000\u0000\u0000\\]\u0003\u0018\f\u0000" + + "]^\u0005T\u0000\u0000^_\u0003\u0004\u0002\u0000_`\u0003\u000e\u0007\u0000" + + "`\u0003\u0001\u0000\u0000\u0000am\u0005\u0007\u0000\u0000bc\u0003\u0018" + + "\f\u0000cj\u0005T\u0000\u0000de\u0005\f\u0000\u0000ef\u0003\u0018\f\u0000" + + "fg\u0005T\u0000\u0000gi\u0001\u0000\u0000\u0000hd\u0001\u0000\u0000\u0000" + + "il\u0001\u0000\u0000\u0000jh\u0001\u0000\u0000\u0000jk\u0001\u0000\u0000" + + "\u0000kn\u0001\u0000\u0000\u0000lj\u0001\u0000\u0000\u0000mb\u0001\u0000" + + "\u0000\u0000mn\u0001\u0000\u0000\u0000no\u0001\u0000\u0000\u0000op\u0005" + + "\b\u0000\u0000p\u0005\u0001\u0000\u0000\u0000qv\u0003\b\u0004\u0000rs" + + "\u0003\n\u0005\u0000st\u0007\u0000\u0000\u0000tv\u0001\u0000\u0000\u0000" + + "uq\u0001\u0000\u0000\u0000ur\u0001\u0000\u0000\u0000v\u0007\u0001\u0000" + + "\u0000\u0000wx\u0005\u000e\u0000\u0000xy\u0005\u0007\u0000\u0000yz\u0003" + + "\"\u0011\u0000z{\u0005\b\u0000\u0000{\u007f\u0003\f\u0006\u0000|}\u0005" + + "\u0010\u0000\u0000}\u0080\u0003\f\u0006\u0000~\u0080\u0004\u0004\u0000" + + "\u0000\u007f|\u0001\u0000\u0000\u0000\u007f~\u0001\u0000\u0000\u0000\u0080" + + "\u00b4\u0001\u0000\u0000\u0000\u0081\u0082\u0005\u0011\u0000\u0000\u0082" + + "\u0083\u0005\u0007\u0000\u0000\u0083\u0084\u0003\"\u0011\u0000\u0084\u0087" + + "\u0005\b\u0000\u0000\u0085\u0088\u0003\f\u0006\u0000\u0086\u0088\u0003" + + "\u0010\b\u0000\u0087\u0085\u0001\u0000\u0000\u0000\u0087\u0086\u0001\u0000" + + "\u0000\u0000\u0088\u00b4\u0001\u0000\u0000\u0000\u0089\u008a\u0005\u0013" + + "\u0000\u0000\u008a\u008c\u0005\u0007\u0000\u0000\u008b\u008d\u0003\u0012" + + "\t\u0000\u008c\u008b\u0001\u0000\u0000\u0000\u008c\u008d\u0001\u0000\u0000" + + "\u0000\u008d\u008e\u0001\u0000\u0000\u0000\u008e\u0090\u0005\r\u0000\u0000" + + "\u008f\u0091\u0003\"\u0011\u0000\u0090\u008f\u0001\u0000\u0000\u0000\u0090" + + "\u0091\u0001\u0000\u0000\u0000\u0091\u0092\u0001\u0000\u0000\u0000\u0092" + + "\u0094\u0005\r\u0000\u0000\u0093\u0095\u0003\u0014\n\u0000\u0094\u0093" + + "\u0001\u0000\u0000\u0000\u0094\u0095\u0001\u0000\u0000\u0000\u0095\u0096" + + "\u0001\u0000\u0000\u0000\u0096\u0099\u0005\b\u0000\u0000\u0097\u009a\u0003" + + "\f\u0006\u0000\u0098\u009a\u0003\u0010\b\u0000\u0099\u0097\u0001\u0000" + + "\u0000\u0000\u0099\u0098\u0001\u0000\u0000\u0000\u009a\u00b4\u0001\u0000" + + "\u0000\u0000\u009b\u009c\u0005\u0013\u0000\u0000\u009c\u009d\u0005\u0007" + + "\u0000\u0000\u009d\u009e\u0003\u0018\f\u0000\u009e\u009f\u0005T\u0000" + + "\u0000\u009f\u00a0\u00055\u0000\u0000\u00a0\u00a1\u0003\"\u0011\u0000" + + "\u00a1\u00a2\u0005\b\u0000\u0000\u00a2\u00a3\u0003\f\u0006\u0000\u00a3" + + "\u00b4\u0001\u0000\u0000\u0000\u00a4\u00a5\u0005\u0013\u0000\u0000\u00a5" + + "\u00a6\u0005\u0007\u0000\u0000\u00a6\u00a7\u0005T\u0000\u0000\u00a7\u00a8" + + "\u0005\u000f\u0000\u0000\u00a8\u00a9\u0003\"\u0011\u0000\u00a9\u00aa\u0005" + + "\b\u0000\u0000\u00aa\u00ab\u0003\f\u0006\u0000\u00ab\u00b4\u0001\u0000" + + "\u0000\u0000\u00ac\u00ad\u0005\u0018\u0000\u0000\u00ad\u00af\u0003\u000e" + + "\u0007\u0000\u00ae\u00b0\u0003\u001e\u000f\u0000\u00af\u00ae\u0001\u0000" + + "\u0000\u0000\u00b0\u00b1\u0001\u0000\u0000\u0000\u00b1\u00af\u0001\u0000" + + "\u0000\u0000\u00b1\u00b2\u0001\u0000\u0000\u0000\u00b2\u00b4\u0001\u0000" + + "\u0000\u0000\u00b3w\u0001\u0000\u0000\u0000\u00b3\u0081\u0001\u0000\u0000" + + "\u0000\u00b3\u0089\u0001\u0000\u0000\u0000\u00b3\u009b\u0001\u0000\u0000" + + "\u0000\u00b3\u00a4\u0001\u0000\u0000\u0000\u00b3\u00ac\u0001\u0000\u0000" + + "\u0000\u00b4\t\u0001\u0000\u0000\u0000\u00b5\u00b6\u0005\u0012\u0000\u0000" + + "\u00b6\u00b7\u0003\u000e\u0007\u0000\u00b7\u00b8\u0005\u0011\u0000\u0000" + + "\u00b8\u00b9\u0005\u0007\u0000\u0000\u00b9\u00ba\u0003\"\u0011\u0000\u00ba" + + "\u00bb\u0005\b\u0000\u0000\u00bb\u00c7\u0001\u0000\u0000\u0000\u00bc\u00c7" + + "\u0003\u0016\u000b\u0000\u00bd\u00c7\u0005\u0014\u0000\u0000\u00be\u00c7" + + "\u0005\u0015\u0000\u0000\u00bf\u00c1\u0005\u0016\u0000\u0000\u00c0\u00c2" + + "\u0003\"\u0011\u0000\u00c1\u00c0\u0001\u0000\u0000\u0000\u00c1\u00c2\u0001" + + "\u0000\u0000\u0000\u00c2\u00c7\u0001\u0000\u0000\u0000\u00c3\u00c4\u0005" + + "\u001a\u0000\u0000\u00c4\u00c7\u0003\"\u0011\u0000\u00c5\u00c7\u0003\"" + + "\u0011\u0000\u00c6\u00b5\u0001\u0000\u0000\u0000\u00c6\u00bc\u0001\u0000" + + "\u0000\u0000\u00c6\u00bd\u0001\u0000\u0000\u0000\u00c6\u00be\u0001\u0000" + + "\u0000\u0000\u00c6\u00bf\u0001\u0000\u0000\u0000\u00c6\u00c3\u0001\u0000" + + "\u0000\u0000\u00c6\u00c5\u0001\u0000\u0000\u0000\u00c7\u000b\u0001\u0000" + + "\u0000\u0000\u00c8\u00cb\u0003\u000e\u0007\u0000\u00c9\u00cb\u0003\u0006" + + "\u0003\u0000\u00ca\u00c8\u0001\u0000\u0000\u0000\u00ca\u00c9\u0001\u0000" + + "\u0000\u0000\u00cb\r\u0001\u0000\u0000\u0000\u00cc\u00d0\u0005\u0003\u0000" + + "\u0000\u00cd\u00cf\u0003\u0006\u0003\u0000\u00ce\u00cd\u0001\u0000\u0000" + + "\u0000\u00cf\u00d2\u0001\u0000\u0000\u0000\u00d0\u00ce\u0001\u0000\u0000" + + "\u0000\u00d0\u00d1\u0001\u0000\u0000\u0000\u00d1\u00d4\u0001\u0000\u0000" + + "\u0000\u00d2\u00d0\u0001\u0000\u0000\u0000\u00d3\u00d5\u0003\n\u0005\u0000" + + "\u00d4\u00d3\u0001\u0000\u0000\u0000\u00d4\u00d5\u0001\u0000\u0000\u0000" + + "\u00d5\u00d6\u0001\u0000\u0000\u0000\u00d6\u00d7\u0005\u0004\u0000\u0000" + + "\u00d7\u000f\u0001\u0000\u0000\u0000\u00d8\u00d9\u0005\r\u0000\u0000\u00d9" + + "\u0011\u0001\u0000\u0000\u0000\u00da\u00dd\u0003\u0016\u000b\u0000\u00db" + + "\u00dd\u0003\"\u0011\u0000\u00dc\u00da\u0001\u0000\u0000\u0000\u00dc\u00db" + + "\u0001\u0000\u0000\u0000\u00dd\u0013\u0001\u0000\u0000\u0000\u00de\u00df" + + "\u0003\"\u0011\u0000\u00df\u0015\u0001\u0000\u0000\u0000\u00e0\u00e1\u0003" + + "\u0018\f\u0000\u00e1\u00e6\u0003\u001c\u000e\u0000\u00e2\u00e3\u0005\f" + + "\u0000\u0000\u00e3\u00e5\u0003\u001c\u000e\u0000\u00e4\u00e2\u0001\u0000" + + "\u0000\u0000\u00e5\u00e8\u0001\u0000\u0000\u0000\u00e6\u00e4\u0001\u0000" + + "\u0000\u0000\u00e6\u00e7\u0001\u0000\u0000\u0000\u00e7\u0017\u0001\u0000" + + "\u0000\u0000\u00e8\u00e6\u0001\u0000\u0000\u0000\u00e9\u00ee\u0003\u001a" + + "\r\u0000\u00ea\u00eb\u0005\u0005\u0000\u0000\u00eb\u00ed\u0005\u0006\u0000" + + "\u0000\u00ec\u00ea\u0001\u0000\u0000\u0000\u00ed\u00f0\u0001\u0000\u0000" + + "\u0000\u00ee\u00ec\u0001\u0000\u0000\u0000\u00ee\u00ef\u0001\u0000\u0000" + + "\u0000\u00ef\u0019\u0001\u0000\u0000\u0000\u00f0\u00ee\u0001\u0000\u0000" + + "\u0000\u00f1\u00fc\u0005S\u0000\u0000\u00f2\u00fc\u0005R\u0000\u0000\u00f3" + + "\u00f8\u0005T\u0000\u0000\u00f4\u00f5\u0005\n\u0000\u0000\u00f5\u00f7" + + "\u0005V\u0000\u0000\u00f6\u00f4\u0001\u0000\u0000\u0000\u00f7\u00fa\u0001" + + "\u0000\u0000\u0000\u00f8\u00f6\u0001\u0000\u0000\u0000\u00f8\u00f9\u0001" + + "\u0000\u0000\u0000\u00f9\u00fc\u0001\u0000\u0000\u0000\u00fa\u00f8\u0001" + + "\u0000\u0000\u0000\u00fb\u00f1\u0001\u0000\u0000\u0000\u00fb\u00f2\u0001" + + "\u0000\u0000\u0000\u00fb\u00f3\u0001\u0000\u0000\u0000\u00fc\u001b\u0001" + + "\u0000\u0000\u0000\u00fd\u0100\u0005T\u0000\u0000\u00fe\u00ff\u0005=\u0000" + + "\u0000\u00ff\u0101\u0003\"\u0011\u0000\u0100\u00fe\u0001\u0000\u0000\u0000" + + "\u0100\u0101\u0001\u0000\u0000\u0000\u0101\u001d\u0001\u0000\u0000\u0000" + + "\u0102\u0103\u0005\u0019\u0000\u0000\u0103\u0104\u0005\u0007\u0000\u0000" + + "\u0104\u0105\u0003\u001a\r\u0000\u0105\u0106\u0005T\u0000\u0000\u0106" + + "\u0107\u0005\b\u0000\u0000\u0107\u0108\u0003\u000e\u0007\u0000\u0108\u001f" + + "\u0001\u0000\u0000\u0000\u0109\u010a\u0006\u0010\uffff\uffff\u0000\u010a" + + "\u010b\u0003$\u0012\u0000\u010b\u0135\u0001\u0000\u0000\u0000\u010c\u010d" + + "\n\r\u0000\u0000\u010d\u010e\u0007\u0001\u0000\u0000\u010e\u0134\u0003" + + " \u0010\u000e\u010f\u0110\n\f\u0000\u0000\u0110\u0111\u0007\u0002\u0000" + + "\u0000\u0111\u0134\u0003 \u0010\r\u0112\u0113\n\u000b\u0000\u0000\u0113" + + "\u0114\u0007\u0003\u0000\u0000\u0114\u0134\u0003 \u0010\f\u0115\u0116" + + "\n\n\u0000\u0000\u0116\u0117\u0007\u0004\u0000\u0000\u0117\u0134\u0003" + + " \u0010\u000b\u0118\u0119\n\t\u0000\u0000\u0119\u011a\u0007\u0005\u0000" + + "\u0000\u011a\u0134\u0003 \u0010\n\u011b\u011c\n\u0007\u0000\u0000\u011c" + + "\u011d\u0007\u0006\u0000\u0000\u011d\u0134\u0003 \u0010\b\u011e\u011f" + + "\n\u0006\u0000\u0000\u011f\u0120\u0005/\u0000\u0000\u0120\u0134\u0003" + + " \u0010\u0007\u0121\u0122\n\u0005\u0000\u0000\u0122\u0123\u00050\u0000" + + "\u0000\u0123\u0134\u0003 \u0010\u0006\u0124\u0125\n\u0004\u0000\u0000" + + "\u0125\u0126\u00051\u0000\u0000\u0126\u0134\u0003 \u0010\u0005\u0127\u0128" + + "\n\u0003\u0000\u0000\u0128\u0129\u00052\u0000\u0000\u0129\u0134\u0003" + + " \u0010\u0004\u012a\u012b\n\u0002\u0000\u0000\u012b\u012c\u00053\u0000" + + "\u0000\u012c\u0134\u0003 \u0010\u0003\u012d\u012e\n\u0001\u0000\u0000" + + "\u012e\u012f\u00056\u0000\u0000\u012f\u0134\u0003 \u0010\u0001\u0130\u0131" + + "\n\b\u0000\u0000\u0131\u0132\u0005\u001c\u0000\u0000\u0132\u0134\u0003" + + "\u0018\f\u0000\u0133\u010c\u0001\u0000\u0000\u0000\u0133\u010f\u0001\u0000" + + "\u0000\u0000\u0133\u0112\u0001\u0000\u0000\u0000\u0133\u0115\u0001\u0000" + + "\u0000\u0000\u0133\u0118\u0001\u0000\u0000\u0000\u0133\u011b\u0001\u0000" + + "\u0000\u0000\u0133\u011e\u0001\u0000\u0000\u0000\u0133\u0121\u0001\u0000" + + "\u0000\u0000\u0133\u0124\u0001\u0000\u0000\u0000\u0133\u0127\u0001\u0000" + + "\u0000\u0000\u0133\u012a\u0001\u0000\u0000\u0000\u0133\u012d\u0001\u0000" + + "\u0000\u0000\u0133\u0130\u0001\u0000\u0000\u0000\u0134\u0137\u0001\u0000" + + "\u0000\u0000\u0135\u0133\u0001\u0000\u0000\u0000\u0135\u0136\u0001\u0000" + + "\u0000\u0000\u0136!\u0001\u0000\u0000\u0000\u0137\u0135\u0001\u0000\u0000" + + "\u0000\u0138\u0144\u0003 \u0010\u0000\u0139\u013a\u0003 \u0010\u0000\u013a" + + "\u013b\u00054\u0000\u0000\u013b\u013c\u0003\"\u0011\u0000\u013c\u013d" + + "\u00055\u0000\u0000\u013d\u013e\u0003\"\u0011\u0000\u013e\u0144\u0001" + + "\u0000\u0000\u0000\u013f\u0140\u0003 \u0010\u0000\u0140\u0141\u0007\u0007" + + "\u0000\u0000\u0141\u0142\u0003\"\u0011\u0000\u0142\u0144\u0001\u0000\u0000" + + "\u0000\u0143\u0138\u0001\u0000\u0000\u0000\u0143\u0139\u0001\u0000\u0000" + + "\u0000\u0143\u013f\u0001\u0000\u0000\u0000\u0144#\u0001\u0000\u0000\u0000" + + "\u0145\u0146\u0007\b\u0000\u0000\u0146\u014b\u0003.\u0017\u0000\u0147" + + "\u0148\u0007\u0002\u0000\u0000\u0148\u014b\u0003$\u0012\u0000\u0149\u014b" + + "\u0003&\u0013\u0000\u014a\u0145\u0001\u0000\u0000\u0000\u014a\u0147\u0001" + + "\u0000\u0000\u0000\u014a\u0149\u0001\u0000\u0000\u0000\u014b%\u0001\u0000" + + "\u0000\u0000\u014c\u0154\u0003.\u0017\u0000\u014d\u014e\u0003.\u0017\u0000" + + "\u014e\u014f\u0007\b\u0000\u0000\u014f\u0154\u0001\u0000\u0000\u0000\u0150" + + "\u0151\u0007\t\u0000\u0000\u0151\u0154\u0003$\u0012\u0000\u0152\u0154" + + "\u0003(\u0014\u0000\u0153\u014c\u0001\u0000\u0000\u0000\u0153\u014d\u0001" + + "\u0000\u0000\u0000\u0153\u0150\u0001\u0000\u0000\u0000\u0153\u0152\u0001" + + "\u0000\u0000\u0000\u0154\'\u0001\u0000\u0000\u0000\u0155\u0156\u0005\u0007" + + "\u0000\u0000\u0156\u0157\u0003*\u0015\u0000\u0157\u0158\u0005\b\u0000" + + "\u0000\u0158\u0159\u0003$\u0012\u0000\u0159\u0160\u0001\u0000\u0000\u0000" + + "\u015a\u015b\u0005\u0007\u0000\u0000\u015b\u015c\u0003,\u0016\u0000\u015c" + + "\u015d\u0005\b\u0000\u0000\u015d\u015e\u0003&\u0013\u0000\u015e\u0160" + + "\u0001\u0000\u0000\u0000\u015f\u0155\u0001\u0000\u0000\u0000\u015f\u015a" + + "\u0001\u0000\u0000\u0000\u0160)\u0001\u0000\u0000\u0000\u0161\u0162\u0007" + + "\n\u0000\u0000\u0162+\u0001\u0000\u0000\u0000\u0163\u0166\u0005S\u0000" + + "\u0000\u0164\u0165\u0005\u0005\u0000\u0000\u0165\u0167\u0005\u0006\u0000" + + "\u0000\u0166\u0164\u0001\u0000\u0000\u0000\u0167\u0168\u0001\u0000\u0000" + + "\u0000\u0168\u0166\u0001\u0000\u0000\u0000\u0168\u0169\u0001\u0000\u0000" + + "\u0000\u0169\u0181\u0001\u0000\u0000\u0000\u016a\u016d\u0005R\u0000\u0000" + + "\u016b\u016c\u0005\u0005\u0000\u0000\u016c\u016e\u0005\u0006\u0000\u0000" + + "\u016d\u016b\u0001\u0000\u0000\u0000\u016e\u016f\u0001\u0000\u0000\u0000" + + "\u016f\u016d\u0001\u0000\u0000\u0000\u016f\u0170\u0001\u0000\u0000\u0000" + + "\u0170\u0181\u0001\u0000\u0000\u0000\u0171\u0176\u0005T\u0000\u0000\u0172" + + "\u0173\u0005\n\u0000\u0000\u0173\u0175\u0005V\u0000\u0000\u0174\u0172" + + "\u0001\u0000\u0000\u0000\u0175\u0178\u0001\u0000\u0000\u0000\u0176\u0174" + + "\u0001\u0000\u0000\u0000\u0176\u0177\u0001\u0000\u0000\u0000\u0177\u017d" + + "\u0001\u0000\u0000\u0000\u0178\u0176\u0001\u0000\u0000\u0000\u0179\u017a" + + "\u0005\u0005\u0000\u0000\u017a\u017c\u0005\u0006\u0000\u0000\u017b\u0179" + + "\u0001\u0000\u0000\u0000\u017c\u017f\u0001\u0000\u0000\u0000\u017d\u017b" + + "\u0001\u0000\u0000\u0000\u017d\u017e\u0001\u0000\u0000\u0000\u017e\u0181" + + "\u0001\u0000\u0000\u0000\u017f\u017d\u0001\u0000\u0000\u0000\u0180\u0163" + + "\u0001\u0000\u0000\u0000\u0180\u016a\u0001\u0000\u0000\u0000\u0180\u0171" + + "\u0001\u0000\u0000\u0000\u0181-\u0001\u0000\u0000\u0000\u0182\u0186\u0003" + + "0\u0018\u0000\u0183\u0185\u00032\u0019\u0000\u0184\u0183\u0001\u0000\u0000" + + "\u0000\u0185\u0188\u0001\u0000\u0000\u0000\u0186\u0184\u0001\u0000\u0000" + + "\u0000\u0186\u0187\u0001\u0000\u0000\u0000\u0187\u018b\u0001\u0000\u0000" + + "\u0000\u0188\u0186\u0001\u0000\u0000\u0000\u0189\u018b\u0003<\u001e\u0000" + + "\u018a\u0182\u0001\u0000\u0000\u0000\u018a\u0189\u0001\u0000\u0000\u0000" + + "\u018b/\u0001\u0000\u0000\u0000\u018c\u018d\u0005\u0007\u0000\u0000\u018d" + + "\u018e\u0003\"\u0011\u0000\u018e\u018f\u0005\b\u0000\u0000\u018f\u01a0" + + "\u0001\u0000\u0000\u0000\u0190\u01a0\u0007\u000b\u0000\u0000\u0191\u01a0" + + "\u0005O\u0000\u0000\u0192\u01a0\u0005P\u0000\u0000\u0193\u01a0\u0005Q" + + "\u0000\u0000\u0194\u01a0\u0005M\u0000\u0000\u0195\u01a0\u0005N\u0000\u0000" + + "\u0196\u01a0\u0003>\u001f\u0000\u0197\u01a0\u0003@ \u0000\u0198\u01a0" + + "\u0005T\u0000\u0000\u0199\u019a\u0007\f\u0000\u0000\u019a\u01a0\u0003" + + "D\"\u0000\u019b\u019c\u0005\u0017\u0000\u0000\u019c\u019d\u0003\u001a" + + "\r\u0000\u019d\u019e\u0003D\"\u0000\u019e\u01a0\u0001\u0000\u0000\u0000" + + "\u019f\u018c\u0001\u0000\u0000\u0000\u019f\u0190\u0001\u0000\u0000\u0000" + + "\u019f\u0191\u0001\u0000\u0000\u0000\u019f\u0192\u0001\u0000\u0000\u0000" + + "\u019f\u0193\u0001\u0000\u0000\u0000\u019f\u0194\u0001\u0000\u0000\u0000" + + "\u019f\u0195\u0001\u0000\u0000\u0000\u019f\u0196\u0001\u0000\u0000\u0000" + + "\u019f\u0197\u0001\u0000\u0000\u0000\u019f\u0198\u0001\u0000\u0000\u0000" + + "\u019f\u0199\u0001\u0000\u0000\u0000\u019f\u019b\u0001\u0000\u0000\u0000" + + "\u01a01\u0001\u0000\u0000\u0000\u01a1\u01a5\u00036\u001b\u0000\u01a2\u01a5" + + "\u00038\u001c\u0000\u01a3\u01a5\u0003:\u001d\u0000\u01a4\u01a1\u0001\u0000" + + "\u0000\u0000\u01a4\u01a2\u0001\u0000\u0000\u0000\u01a4\u01a3\u0001\u0000" + + "\u0000\u0000\u01a53\u0001\u0000\u0000\u0000\u01a6\u01a9\u00036\u001b\u0000" + + "\u01a7\u01a9\u00038\u001c\u0000\u01a8\u01a6\u0001\u0000\u0000\u0000\u01a8" + + "\u01a7\u0001\u0000\u0000\u0000\u01a95\u0001\u0000\u0000\u0000\u01aa\u01ab" + + "\u0007\r\u0000\u0000\u01ab\u01ac\u0005V\u0000\u0000\u01ac\u01ad\u0003" + + "D\"\u0000\u01ad7\u0001\u0000\u0000\u0000\u01ae\u01af\u0007\r\u0000\u0000" + + "\u01af\u01b0\u0007\u000e\u0000\u0000\u01b09\u0001\u0000\u0000\u0000\u01b1" + + "\u01b2\u0005\u0005\u0000\u0000\u01b2\u01b3\u0003\"\u0011\u0000\u01b3\u01b4" + + "\u0005\u0006\u0000\u0000\u01b4;\u0001\u0000\u0000\u0000\u01b5\u01b6\u0005" + + "\u0017\u0000\u0000\u01b6\u01bb\u0003\u001a\r\u0000\u01b7\u01b8\u0005\u0005" + + "\u0000\u0000\u01b8\u01b9\u0003\"\u0011\u0000\u01b9\u01ba\u0005\u0006\u0000" + + "\u0000\u01ba\u01bc\u0001\u0000\u0000\u0000\u01bb\u01b7\u0001\u0000\u0000" + + "\u0000\u01bc\u01bd\u0001\u0000\u0000\u0000\u01bd\u01bb\u0001\u0000\u0000" + + "\u0000\u01bd\u01be\u0001\u0000\u0000\u0000\u01be\u01c6\u0001\u0000\u0000" + + "\u0000\u01bf\u01c3\u00034\u001a\u0000\u01c0\u01c2\u00032\u0019\u0000\u01c1" + + "\u01c0\u0001\u0000\u0000\u0000\u01c2\u01c5\u0001\u0000\u0000\u0000\u01c3" + + "\u01c1\u0001\u0000\u0000\u0000\u01c3\u01c4\u0001\u0000\u0000\u0000\u01c4" + + "\u01c7\u0001\u0000\u0000\u0000\u01c5\u01c3\u0001\u0000\u0000\u0000\u01c6" + + "\u01bf\u0001\u0000\u0000\u0000\u01c6\u01c7\u0001\u0000\u0000\u0000\u01c7" + + "\u01df\u0001\u0000\u0000\u0000\u01c8\u01c9\u0005\u0017\u0000\u0000\u01c9" + + "\u01ca\u0003\u001a\r\u0000\u01ca\u01cb\u0005\u0005\u0000\u0000\u01cb\u01cc" + + "\u0005\u0006\u0000\u0000\u01cc\u01d5\u0005\u0003\u0000\u0000\u01cd\u01d2" + + "\u0003\"\u0011\u0000\u01ce\u01cf\u0005\f\u0000\u0000\u01cf\u01d1\u0003" + + "\"\u0011\u0000\u01d0\u01ce\u0001\u0000\u0000\u0000\u01d1\u01d4\u0001\u0000" + + "\u0000\u0000\u01d2\u01d0\u0001\u0000\u0000\u0000\u01d2\u01d3\u0001\u0000" + + "\u0000\u0000\u01d3\u01d6\u0001\u0000\u0000\u0000\u01d4\u01d2\u0001\u0000" + + "\u0000\u0000\u01d5\u01cd\u0001\u0000\u0000\u0000\u01d5\u01d6\u0001\u0000" + + "\u0000\u0000\u01d6\u01d7\u0001\u0000\u0000\u0000\u01d7\u01db\u0005\u0004" + + "\u0000\u0000\u01d8\u01da\u00032\u0019\u0000\u01d9\u01d8\u0001\u0000\u0000" + + "\u0000\u01da\u01dd\u0001\u0000\u0000\u0000\u01db\u01d9\u0001\u0000\u0000" + + "\u0000\u01db\u01dc\u0001\u0000\u0000\u0000\u01dc\u01df\u0001\u0000\u0000" + + "\u0000\u01dd\u01db\u0001\u0000\u0000\u0000\u01de\u01b5\u0001\u0000\u0000" + + "\u0000\u01de\u01c8\u0001\u0000\u0000\u0000\u01df=\u0001\u0000\u0000\u0000" + + "\u01e0\u01e1\u0005\u0005\u0000\u0000\u01e1\u01e6\u0003\"\u0011\u0000\u01e2" + + "\u01e3\u0005\f\u0000\u0000\u01e3\u01e5\u0003\"\u0011\u0000\u01e4\u01e2" + + "\u0001\u0000\u0000\u0000\u01e5\u01e8\u0001\u0000\u0000\u0000\u01e6\u01e4" + + "\u0001\u0000\u0000\u0000\u01e6\u01e7\u0001\u0000\u0000\u0000\u01e7\u01e9" + + "\u0001\u0000\u0000\u0000\u01e8\u01e6\u0001\u0000\u0000\u0000\u01e9\u01ea" + + "\u0005\u0006\u0000\u0000\u01ea\u01ee\u0001\u0000\u0000\u0000\u01eb\u01ec" + + "\u0005\u0005\u0000\u0000\u01ec\u01ee\u0005\u0006\u0000\u0000\u01ed\u01e0" + + "\u0001\u0000\u0000\u0000\u01ed\u01eb\u0001\u0000\u0000\u0000\u01ee?\u0001" + + "\u0000\u0000\u0000\u01ef\u01f0\u0005\u0005\u0000\u0000\u01f0\u01f5\u0003" + + "B!\u0000\u01f1\u01f2\u0005\f\u0000\u0000\u01f2\u01f4\u0003B!\u0000\u01f3" + + "\u01f1\u0001\u0000\u0000\u0000\u01f4\u01f7\u0001\u0000\u0000\u0000\u01f5" + + "\u01f3\u0001\u0000\u0000\u0000\u01f5\u01f6\u0001\u0000\u0000\u0000\u01f6" + + "\u01f8\u0001\u0000\u0000\u0000\u01f7\u01f5\u0001\u0000\u0000\u0000\u01f8" + + "\u01f9\u0005\u0006\u0000\u0000\u01f9\u01fe\u0001\u0000\u0000\u0000\u01fa" + + "\u01fb\u0005\u0005\u0000\u0000\u01fb\u01fc\u00055\u0000\u0000\u01fc\u01fe" + + "\u0005\u0006\u0000\u0000\u01fd\u01ef\u0001\u0000\u0000\u0000\u01fd\u01fa" + + "\u0001\u0000\u0000\u0000\u01feA\u0001\u0000\u0000\u0000\u01ff\u0200\u0003" + + "\"\u0011\u0000\u0200\u0201\u00055\u0000\u0000\u0201\u0202\u0003\"\u0011" + + "\u0000\u0202C\u0001\u0000\u0000\u0000\u0203\u020c\u0005\u0007\u0000\u0000" + + "\u0204\u0209\u0003F#\u0000\u0205\u0206\u0005\f\u0000\u0000\u0206\u0208" + + "\u0003F#\u0000\u0207\u0205\u0001\u0000\u0000\u0000\u0208\u020b\u0001\u0000" + + "\u0000\u0000\u0209\u0207\u0001\u0000\u0000\u0000\u0209\u020a\u0001\u0000" + + "\u0000\u0000\u020a\u020d\u0001\u0000\u0000\u0000\u020b\u0209\u0001\u0000" + + "\u0000\u0000\u020c\u0204\u0001\u0000\u0000\u0000\u020c\u020d\u0001\u0000" + + "\u0000\u0000\u020d\u020e\u0001\u0000\u0000\u0000\u020e\u020f\u0005\b\u0000" + + "\u0000\u020fE\u0001\u0000\u0000\u0000\u0210\u0214\u0003\"\u0011\u0000" + + "\u0211\u0214\u0003H$\u0000\u0212\u0214\u0003L&\u0000\u0213\u0210\u0001" + + "\u0000\u0000\u0000\u0213\u0211\u0001\u0000\u0000\u0000\u0213\u0212\u0001" + + "\u0000\u0000\u0000\u0214G\u0001\u0000\u0000\u0000\u0215\u0223\u0003J%" + + "\u0000\u0216\u021f\u0005\u0007\u0000\u0000\u0217\u021c\u0003J%\u0000\u0218" + + "\u0219\u0005\f\u0000\u0000\u0219\u021b\u0003J%\u0000\u021a\u0218\u0001" + + "\u0000\u0000\u0000\u021b\u021e\u0001\u0000\u0000\u0000\u021c\u021a\u0001" + + "\u0000\u0000\u0000\u021c\u021d\u0001\u0000\u0000\u0000\u021d\u0220\u0001" + + "\u0000\u0000\u0000\u021e\u021c\u0001\u0000\u0000\u0000\u021f\u0217\u0001" + + "\u0000\u0000\u0000\u021f\u0220\u0001\u0000\u0000\u0000\u0220\u0221\u0001" + + "\u0000\u0000\u0000\u0221\u0223\u0005\b\u0000\u0000\u0222\u0215\u0001\u0000" + + "\u0000\u0000\u0222\u0216\u0001\u0000\u0000\u0000\u0223\u0224\u0001\u0000" + + "\u0000\u0000\u0224\u0227\u00058\u0000\u0000\u0225\u0228\u0003\u000e\u0007" + + "\u0000\u0226\u0228\u0003\"\u0011\u0000\u0227\u0225\u0001\u0000\u0000\u0000" + + "\u0227\u0226\u0001\u0000\u0000\u0000\u0228I\u0001\u0000\u0000\u0000\u0229" + + "\u022b\u0003\u0018\f\u0000\u022a\u0229\u0001\u0000\u0000\u0000\u022a\u022b" + + "\u0001\u0000\u0000\u0000\u022b\u022c\u0001\u0000\u0000\u0000\u022c\u022d" + + "\u0005T\u0000\u0000\u022dK\u0001\u0000\u0000\u0000\u022e\u022f\u0003\u0018" + + "\f\u0000\u022f\u0230\u00057\u0000\u0000\u0230\u0231\u0005T\u0000\u0000" + + "\u0231\u023a\u0001\u0000\u0000\u0000\u0232\u0233\u0003\u0018\f\u0000\u0233" + + "\u0234\u00057\u0000\u0000\u0234\u0235\u0005\u0017\u0000\u0000\u0235\u023a" + + "\u0001\u0000\u0000\u0000\u0236\u0237\u0005\u001b\u0000\u0000\u0237\u0238" + + "\u00057\u0000\u0000\u0238\u023a\u0005T\u0000\u0000\u0239\u022e\u0001\u0000" + + "\u0000\u0000\u0239\u0232\u0001\u0000\u0000\u0000\u0239\u0236\u0001\u0000" + + "\u0000\u0000\u023aM\u0001\u0000\u0000\u0000 The return type of the visit operation. Use {@link Void} for * operations with no return type. */ +@SuppressWarnings("CheckReturnValue") class PainlessParserBaseVisitor extends AbstractParseTreeVisitor implements PainlessParserVisitor { /** * {@inheritDoc} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SuggestLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SuggestLexer.java index 0bad35d925f2..e9d89d0a9667 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SuggestLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/SuggestLexer.java @@ -1,17 +1,22 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; -import org.antlr.v4.runtime.*; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Lexer; -import org.antlr.v4.runtime.atn.*; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.LexerATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; import org.antlr.v4.runtime.dfa.DFA; -import org.antlr.v4.runtime.misc.*; -@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast" }) +@SuppressWarnings({ "all", "warnings", "unchecked", "unused", "cast", "CheckReturnValue" }) public abstract class SuggestLexer extends Lexer { static { - RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); + RuntimeMetaData.checkVersion("4.11.1", RuntimeMetaData.VERSION); } protected static final DFA[] _decisionToDFA; @@ -25,266 +30,281 @@ public abstract class SuggestLexer extends Lexer { 68, ALSH = 69, ARSH = 70, AUSH = 71, OCTAL = 72, HEX = 73, INTEGER = 74, DECIMAL = 75, STRING = 76, REGEX = 77, TRUE = 78, FALSE = 79, NULL = 80, ATYPE = 81, TYPE = 82, ID = 83, UNKNOWN = 84, DOTINTEGER = 85, DOTID = 86; public static final int AFTER_DOT = 1; + public static String[] channelNames = { "DEFAULT_TOKEN_CHANNEL", "HIDDEN" }; + public static String[] modeNames = { "DEFAULT_MODE", "AFTER_DOT" }; - public static final String[] ruleNames = { - "WS", - "COMMENT", - "LBRACK", - "RBRACK", - "LBRACE", - "RBRACE", - "LP", - "RP", - "DOT", - "NSDOT", - "COMMA", - "SEMICOLON", - "IF", - "IN", - "ELSE", - "WHILE", - "DO", - "FOR", - "CONTINUE", - "BREAK", - "RETURN", - "NEW", - "TRY", - "CATCH", - "THROW", - "THIS", - "INSTANCEOF", - "BOOLNOT", - "BWNOT", - "MUL", - "DIV", - "REM", - "ADD", - "SUB", - "LSH", - "RSH", - "USH", - "LT", - "LTE", - "GT", - "GTE", - "EQ", - "EQR", - "NE", - "NER", - "BWAND", - "XOR", - "BWOR", - "BOOLAND", - "BOOLOR", - "COND", - "COLON", - "ELVIS", - "REF", - "ARROW", - "FIND", - "MATCH", - "INCR", - "DECR", - "ASSIGN", - "AADD", - "ASUB", - "AMUL", - "ADIV", - "AREM", - "AAND", - "AXOR", - "AOR", - "ALSH", - "ARSH", - "AUSH", - "OCTAL", - "HEX", - "INTEGER", - "DECIMAL", - "STRING", - "REGEX", - "TRUE", - "FALSE", - "NULL", - "ATYPE", - "TYPE", - "ID", - "UNKNOWN", - "DOTINTEGER", - "DOTID" }; + private static String[] makeRuleNames() { + return new String[] { + "WS", + "COMMENT", + "LBRACK", + "RBRACK", + "LBRACE", + "RBRACE", + "LP", + "RP", + "DOT", + "NSDOT", + "COMMA", + "SEMICOLON", + "IF", + "IN", + "ELSE", + "WHILE", + "DO", + "FOR", + "CONTINUE", + "BREAK", + "RETURN", + "NEW", + "TRY", + "CATCH", + "THROW", + "THIS", + "INSTANCEOF", + "BOOLNOT", + "BWNOT", + "MUL", + "DIV", + "REM", + "ADD", + "SUB", + "LSH", + "RSH", + "USH", + "LT", + "LTE", + "GT", + "GTE", + "EQ", + "EQR", + "NE", + "NER", + "BWAND", + "XOR", + "BWOR", + "BOOLAND", + "BOOLOR", + "COND", + "COLON", + "ELVIS", + "REF", + "ARROW", + "FIND", + "MATCH", + "INCR", + "DECR", + "ASSIGN", + "AADD", + "ASUB", + "AMUL", + "ADIV", + "AREM", + "AAND", + "AXOR", + "AOR", + "ALSH", + "ARSH", + "AUSH", + "OCTAL", + "HEX", + "INTEGER", + "DECIMAL", + "STRING", + "REGEX", + "TRUE", + "FALSE", + "NULL", + "ATYPE", + "TYPE", + "ID", + "UNKNOWN", + "DOTINTEGER", + "DOTID" }; + } + + public static final String[] ruleNames = makeRuleNames(); + + private static String[] makeLiteralNames() { + return new String[] { + null, + null, + null, + "'{'", + "'}'", + "'['", + "']'", + "'('", + "')'", + "'.'", + "'?.'", + "','", + "';'", + "'if'", + "'in'", + "'else'", + "'while'", + "'do'", + "'for'", + "'continue'", + "'break'", + "'return'", + "'new'", + "'try'", + "'catch'", + "'throw'", + "'this'", + "'instanceof'", + "'!'", + "'~'", + "'*'", + "'/'", + "'%'", + "'+'", + "'-'", + "'<<'", + "'>>'", + "'>>>'", + "'<'", + "'<='", + "'>'", + "'>='", + "'=='", + "'==='", + "'!='", + "'!=='", + "'&'", + "'^'", + "'|'", + "'&&'", + "'||'", + "'?'", + "':'", + "'?:'", + "'::'", + "'->'", + "'=~'", + "'==~'", + "'++'", + "'--'", + "'='", + "'+='", + "'-='", + "'*='", + "'/='", + "'%='", + "'&='", + "'^='", + "'|='", + "'<<='", + "'>>='", + "'>>>='", + null, + null, + null, + null, + null, + null, + "'true'", + "'false'", + "'null'" }; + } + + private static final String[] _LITERAL_NAMES = makeLiteralNames(); - private static final String[] _LITERAL_NAMES = { - null, - null, - null, - "'{'", - "'}'", - "'['", - "']'", - "'('", - "')'", - "'.'", - "'?.'", - "','", - "';'", - "'if'", - "'in'", - "'else'", - "'while'", - "'do'", - "'for'", - "'continue'", - "'break'", - "'return'", - "'new'", - "'try'", - "'catch'", - "'throw'", - "'this'", - "'instanceof'", - "'!'", - "'~'", - "'*'", - "'/'", - "'%'", - "'+'", - "'-'", - "'<<'", - "'>>'", - "'>>>'", - "'<'", - "'<='", - "'>'", - "'>='", - "'=='", - "'==='", - "'!='", - "'!=='", - "'&'", - "'^'", - "'|'", - "'&&'", - "'||'", - "'?'", - "':'", - "'?:'", - "'::'", - "'->'", - "'=~'", - "'==~'", - "'++'", - "'--'", - "'='", - "'+='", - "'-='", - "'*='", - "'/='", - "'%='", - "'&='", - "'^='", - "'|='", - "'<<='", - "'>>='", - "'>>>='", - null, - null, - null, - null, - null, - null, - "'true'", - "'false'", - "'null'" }; - private static final String[] _SYMBOLIC_NAMES = { - null, - "WS", - "COMMENT", - "LBRACK", - "RBRACK", - "LBRACE", - "RBRACE", - "LP", - "RP", - "DOT", - "NSDOT", - "COMMA", - "SEMICOLON", - "IF", - "IN", - "ELSE", - "WHILE", - "DO", - "FOR", - "CONTINUE", - "BREAK", - "RETURN", - "NEW", - "TRY", - "CATCH", - "THROW", - "THIS", - "INSTANCEOF", - "BOOLNOT", - "BWNOT", - "MUL", - "DIV", - "REM", - "ADD", - "SUB", - "LSH", - "RSH", - "USH", - "LT", - "LTE", - "GT", - "GTE", - "EQ", - "EQR", - "NE", - "NER", - "BWAND", - "XOR", - "BWOR", - "BOOLAND", - "BOOLOR", - "COND", - "COLON", - "ELVIS", - "REF", - "ARROW", - "FIND", - "MATCH", - "INCR", - "DECR", - "ASSIGN", - "AADD", - "ASUB", - "AMUL", - "ADIV", - "AREM", - "AAND", - "AXOR", - "AOR", - "ALSH", - "ARSH", - "AUSH", - "OCTAL", - "HEX", - "INTEGER", - "DECIMAL", - "STRING", - "REGEX", - "TRUE", - "FALSE", - "NULL", - "ATYPE", - "TYPE", - "ID", - "UNKNOWN", - "DOTINTEGER", - "DOTID" }; + private static String[] makeSymbolicNames() { + return new String[] { + null, + "WS", + "COMMENT", + "LBRACK", + "RBRACK", + "LBRACE", + "RBRACE", + "LP", + "RP", + "DOT", + "NSDOT", + "COMMA", + "SEMICOLON", + "IF", + "IN", + "ELSE", + "WHILE", + "DO", + "FOR", + "CONTINUE", + "BREAK", + "RETURN", + "NEW", + "TRY", + "CATCH", + "THROW", + "THIS", + "INSTANCEOF", + "BOOLNOT", + "BWNOT", + "MUL", + "DIV", + "REM", + "ADD", + "SUB", + "LSH", + "RSH", + "USH", + "LT", + "LTE", + "GT", + "GTE", + "EQ", + "EQR", + "NE", + "NER", + "BWAND", + "XOR", + "BWOR", + "BOOLAND", + "BOOLOR", + "COND", + "COLON", + "ELVIS", + "REF", + "ARROW", + "FIND", + "MATCH", + "INCR", + "DECR", + "ASSIGN", + "AADD", + "ASUB", + "AMUL", + "ADIV", + "AREM", + "AAND", + "AXOR", + "AOR", + "ALSH", + "ARSH", + "AUSH", + "OCTAL", + "HEX", + "INTEGER", + "DECIMAL", + "STRING", + "REGEX", + "TRUE", + "FALSE", + "NULL", + "ATYPE", + "TYPE", + "ID", + "UNKNOWN", + "DOTINTEGER", + "DOTID" }; + } + + private static final String[] _SYMBOLIC_NAMES = makeSymbolicNames(); public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); /** @@ -343,6 +363,11 @@ public String getSerializedATN() { return _serializedATN; } + @Override + public String[] getChannelNames() { + return channelNames; + } + @Override public String[] getModeNames() { return modeNames; @@ -390,221 +415,394 @@ private boolean TYPE_sempred(RuleContext _localctx, int predIndex) { return true; } - public static final String _serializedATN = "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2X\u0267\b\1\b\1\4" - + "\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n" - + "\4\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22" - + "\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31" - + "\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t" - + " \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t" - + "+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64" - + "\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t" - + "=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4" - + "I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\t" - + "T\4U\tU\4V\tV\4W\tW\3\2\6\2\u00b2\n\2\r\2\16\2\u00b3\3\2\3\2\3\3\3\3\3" - + "\3\3\3\7\3\u00bc\n\3\f\3\16\3\u00bf\13\3\3\3\3\3\3\3\3\3\3\3\7\3\u00c6" - + "\n\3\f\3\16\3\u00c9\13\3\3\3\3\3\5\3\u00cd\n\3\3\3\3\3\3\4\3\4\3\5\3\5" - + "\3\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3" - + "\13\3\f\3\f\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\17\3\20\3\20\3\20\3\20" - + "\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\23\3\23\3\23\3\23" - + "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25" - + "\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\30\3\30" - + "\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32\3\32\3\32" - + "\3\33\3\33\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34" - + "\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37\3 \3 \3 \3!\3!\3\"\3\"\3#\3#" - + "\3$\3$\3$\3%\3%\3%\3&\3&\3&\3&\3\'\3\'\3(\3(\3(\3)\3)\3*\3*\3*\3+\3+\3" - + "+\3,\3,\3,\3,\3-\3-\3-\3.\3.\3.\3.\3/\3/\3\60\3\60\3\61\3\61\3\62\3\62" - + "\3\62\3\63\3\63\3\63\3\64\3\64\3\65\3\65\3\66\3\66\3\66\3\67\3\67\3\67" - + "\38\38\38\39\39\39\3:\3:\3:\3:\3;\3;\3;\3<\3<\3<\3=\3=\3>\3>\3>\3?\3?" - + "\3?\3@\3@\3@\3A\3A\3A\3B\3B\3B\3C\3C\3C\3D\3D\3D\3E\3E\3E\3F\3F\3F\3F" - + "\3G\3G\3G\3G\3H\3H\3H\3H\3H\3I\3I\6I\u01bc\nI\rI\16I\u01bd\3I\5I\u01c1" - + "\nI\3J\3J\3J\6J\u01c6\nJ\rJ\16J\u01c7\3J\5J\u01cb\nJ\3K\3K\3K\7K\u01d0" - + "\nK\fK\16K\u01d3\13K\5K\u01d5\nK\3K\5K\u01d8\nK\3L\3L\3L\7L\u01dd\nL\f" - + "L\16L\u01e0\13L\5L\u01e2\nL\3L\3L\6L\u01e6\nL\rL\16L\u01e7\5L\u01ea\n" - + "L\3L\3L\5L\u01ee\nL\3L\6L\u01f1\nL\rL\16L\u01f2\5L\u01f5\nL\3L\5L\u01f8" - + "\nL\3M\3M\3M\3M\3M\3M\7M\u0200\nM\fM\16M\u0203\13M\3M\3M\3M\3M\3M\3M\3" - + "M\7M\u020c\nM\fM\16M\u020f\13M\3M\5M\u0212\nM\3N\3N\3N\3N\6N\u0218\nN" - + "\rN\16N\u0219\3N\3N\7N\u021e\nN\fN\16N\u0221\13N\3N\3N\3O\3O\3O\3O\3O" - + "\3P\3P\3P\3P\3P\3P\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\6R\u0239\nR\rR\16R\u023a" - + "\3S\3S\3S\3S\7S\u0241\nS\fS\16S\u0244\13S\3S\3S\3T\3T\7T\u024a\nT\fT\16" - + "T\u024d\13T\3U\3U\3U\3U\3V\3V\3V\7V\u0256\nV\fV\16V\u0259\13V\5V\u025b" - + "\nV\3V\3V\3W\3W\7W\u0261\nW\fW\16W\u0264\13W\3W\3W\7\u00bd\u00c7\u0201" - + "\u020d\u0219\2X\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32" - + "\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27.\30\60\31\62\32\64\33\66" - + "\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64" - + "h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008a" - + "F\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\u0098M\u009aN\u009cO\u009e" - + "P\u00a0Q\u00a2R\u00a4S\u00a6T\u00a8U\u00aaV\u00acW\u00aeX\4\2\3\25\5\2" - + "\13\f\17\17\"\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2" - + "\63;\3\2\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2)" - + ")^^\3\2\f\f\4\2\f\f\61\61\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|" - + "\u0288\2\4\3\2\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16" - + "\3\2\2\2\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2" - + "\2\2\2\32\3\2\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$" - + "\3\2\2\2\2&\3\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3" - + "\2\2\2\2\62\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2" - + "<\3\2\2\2\2>\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3" - + "\2\2\2\2J\3\2\2\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2" - + "\2\2V\3\2\2\2\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2" - + "\2b\3\2\2\2\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n" - + "\3\2\2\2\2p\3\2\2\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2" - + "\2\2\2|\3\2\2\2\2~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2" - + "\2\2\2\u0086\3\2\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2" - + "\u008e\3\2\2\2\2\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096" - + "\3\2\2\2\2\u0098\3\2\2\2\2\u009a\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2" - + "\2\2\u00a0\3\2\2\2\2\u00a2\3\2\2\2\2\u00a4\3\2\2\2\2\u00a6\3\2\2\2\2\u00a8" - + "\3\2\2\2\2\u00aa\3\2\2\2\3\u00ac\3\2\2\2\3\u00ae\3\2\2\2\4\u00b1\3\2\2" - + "\2\6\u00cc\3\2\2\2\b\u00d0\3\2\2\2\n\u00d2\3\2\2\2\f\u00d4\3\2\2\2\16" - + "\u00d6\3\2\2\2\20\u00d8\3\2\2\2\22\u00da\3\2\2\2\24\u00dc\3\2\2\2\26\u00e0" - + "\3\2\2\2\30\u00e5\3\2\2\2\32\u00e7\3\2\2\2\34\u00e9\3\2\2\2\36\u00ec\3" - + "\2\2\2 \u00ef\3\2\2\2\"\u00f4\3\2\2\2$\u00fa\3\2\2\2&\u00fd\3\2\2\2(\u0101" - + "\3\2\2\2*\u010a\3\2\2\2,\u0110\3\2\2\2.\u0117\3\2\2\2\60\u011b\3\2\2\2" - + "\62\u011f\3\2\2\2\64\u0125\3\2\2\2\66\u012b\3\2\2\28\u0130\3\2\2\2:\u013b" - + "\3\2\2\2<\u013d\3\2\2\2>\u013f\3\2\2\2@\u0141\3\2\2\2B\u0144\3\2\2\2D" - + "\u0146\3\2\2\2F\u0148\3\2\2\2H\u014a\3\2\2\2J\u014d\3\2\2\2L\u0150\3\2" - + "\2\2N\u0154\3\2\2\2P\u0156\3\2\2\2R\u0159\3\2\2\2T\u015b\3\2\2\2V\u015e" - + "\3\2\2\2X\u0161\3\2\2\2Z\u0165\3\2\2\2\\\u0168\3\2\2\2^\u016c\3\2\2\2" - + "`\u016e\3\2\2\2b\u0170\3\2\2\2d\u0172\3\2\2\2f\u0175\3\2\2\2h\u0178\3" - + "\2\2\2j\u017a\3\2\2\2l\u017c\3\2\2\2n\u017f\3\2\2\2p\u0182\3\2\2\2r\u0185" - + "\3\2\2\2t\u0188\3\2\2\2v\u018c\3\2\2\2x\u018f\3\2\2\2z\u0192\3\2\2\2|" - + "\u0194\3\2\2\2~\u0197\3\2\2\2\u0080\u019a\3\2\2\2\u0082\u019d\3\2\2\2" - + "\u0084\u01a0\3\2\2\2\u0086\u01a3\3\2\2\2\u0088\u01a6\3\2\2\2\u008a\u01a9" - + "\3\2\2\2\u008c\u01ac\3\2\2\2\u008e\u01b0\3\2\2\2\u0090\u01b4\3\2\2\2\u0092" - + "\u01b9\3\2\2\2\u0094\u01c2\3\2\2\2\u0096\u01d4\3\2\2\2\u0098\u01e1\3\2" - + "\2\2\u009a\u0211\3\2\2\2\u009c\u0213\3\2\2\2\u009e\u0224\3\2\2\2\u00a0" - + "\u0229\3\2\2\2\u00a2\u022f\3\2\2\2\u00a4\u0234\3\2\2\2\u00a6\u023c\3\2" - + "\2\2\u00a8\u0247\3\2\2\2\u00aa\u024e\3\2\2\2\u00ac\u025a\3\2\2\2\u00ae" - + "\u025e\3\2\2\2\u00b0\u00b2\t\2\2\2\u00b1\u00b0\3\2\2\2\u00b2\u00b3\3\2" - + "\2\2\u00b3\u00b1\3\2\2\2\u00b3\u00b4\3\2\2\2\u00b4\u00b5\3\2\2\2\u00b5" - + "\u00b6\b\2\2\2\u00b6\5\3\2\2\2\u00b7\u00b8\7\61\2\2\u00b8\u00b9\7\61\2" - + "\2\u00b9\u00bd\3\2\2\2\u00ba\u00bc\13\2\2\2\u00bb\u00ba\3\2\2\2\u00bc" - + "\u00bf\3\2\2\2\u00bd\u00be\3\2\2\2\u00bd\u00bb\3\2\2\2\u00be\u00c0\3\2" - + "\2\2\u00bf\u00bd\3\2\2\2\u00c0\u00cd\t\3\2\2\u00c1\u00c2\7\61\2\2\u00c2" - + "\u00c3\7,\2\2\u00c3\u00c7\3\2\2\2\u00c4\u00c6\13\2\2\2\u00c5\u00c4\3\2" - + "\2\2\u00c6\u00c9\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c7\u00c5\3\2\2\2\u00c8" - + "\u00ca\3\2\2\2\u00c9\u00c7\3\2\2\2\u00ca\u00cb\7,\2\2\u00cb\u00cd\7\61" - + "\2\2\u00cc\u00b7\3\2\2\2\u00cc\u00c1\3\2\2\2\u00cd\u00ce\3\2\2\2\u00ce" - + "\u00cf\b\3\2\2\u00cf\7\3\2\2\2\u00d0\u00d1\7}\2\2\u00d1\t\3\2\2\2\u00d2" - + "\u00d3\7\177\2\2\u00d3\13\3\2\2\2\u00d4\u00d5\7]\2\2\u00d5\r\3\2\2\2\u00d6" - + "\u00d7\7_\2\2\u00d7\17\3\2\2\2\u00d8\u00d9\7*\2\2\u00d9\21\3\2\2\2\u00da" - + "\u00db\7+\2\2\u00db\23\3\2\2\2\u00dc\u00dd\7\60\2\2\u00dd\u00de\3\2\2" - + "\2\u00de\u00df\b\n\3\2\u00df\25\3\2\2\2\u00e0\u00e1\7A\2\2\u00e1\u00e2" - + "\7\60\2\2\u00e2\u00e3\3\2\2\2\u00e3\u00e4\b\13\3\2\u00e4\27\3\2\2\2\u00e5" - + "\u00e6\7.\2\2\u00e6\31\3\2\2\2\u00e7\u00e8\7=\2\2\u00e8\33\3\2\2\2\u00e9" - + "\u00ea\7k\2\2\u00ea\u00eb\7h\2\2\u00eb\35\3\2\2\2\u00ec\u00ed\7k\2\2\u00ed" - + "\u00ee\7p\2\2\u00ee\37\3\2\2\2\u00ef\u00f0\7g\2\2\u00f0\u00f1\7n\2\2\u00f1" - + "\u00f2\7u\2\2\u00f2\u00f3\7g\2\2\u00f3!\3\2\2\2\u00f4\u00f5\7y\2\2\u00f5" - + "\u00f6\7j\2\2\u00f6\u00f7\7k\2\2\u00f7\u00f8\7n\2\2\u00f8\u00f9\7g\2\2" - + "\u00f9#\3\2\2\2\u00fa\u00fb\7f\2\2\u00fb\u00fc\7q\2\2\u00fc%\3\2\2\2\u00fd" - + "\u00fe\7h\2\2\u00fe\u00ff\7q\2\2\u00ff\u0100\7t\2\2\u0100\'\3\2\2\2\u0101" - + "\u0102\7e\2\2\u0102\u0103\7q\2\2\u0103\u0104\7p\2\2\u0104\u0105\7v\2\2" - + "\u0105\u0106\7k\2\2\u0106\u0107\7p\2\2\u0107\u0108\7w\2\2\u0108\u0109" - + "\7g\2\2\u0109)\3\2\2\2\u010a\u010b\7d\2\2\u010b\u010c\7t\2\2\u010c\u010d" - + "\7g\2\2\u010d\u010e\7c\2\2\u010e\u010f\7m\2\2\u010f+\3\2\2\2\u0110\u0111" - + "\7t\2\2\u0111\u0112\7g\2\2\u0112\u0113\7v\2\2\u0113\u0114\7w\2\2\u0114" - + "\u0115\7t\2\2\u0115\u0116\7p\2\2\u0116-\3\2\2\2\u0117\u0118\7p\2\2\u0118" - + "\u0119\7g\2\2\u0119\u011a\7y\2\2\u011a/\3\2\2\2\u011b\u011c\7v\2\2\u011c" - + "\u011d\7t\2\2\u011d\u011e\7{\2\2\u011e\61\3\2\2\2\u011f\u0120\7e\2\2\u0120" - + "\u0121\7c\2\2\u0121\u0122\7v\2\2\u0122\u0123\7e\2\2\u0123\u0124\7j\2\2" - + "\u0124\63\3\2\2\2\u0125\u0126\7v\2\2\u0126\u0127\7j\2\2\u0127\u0128\7" - + "t\2\2\u0128\u0129\7q\2\2\u0129\u012a\7y\2\2\u012a\65\3\2\2\2\u012b\u012c" - + "\7v\2\2\u012c\u012d\7j\2\2\u012d\u012e\7k\2\2\u012e\u012f\7u\2\2\u012f" - + "\67\3\2\2\2\u0130\u0131\7k\2\2\u0131\u0132\7p\2\2\u0132\u0133\7u\2\2\u0133" - + "\u0134\7v\2\2\u0134\u0135\7c\2\2\u0135\u0136\7p\2\2\u0136\u0137\7e\2\2" - + "\u0137\u0138\7g\2\2\u0138\u0139\7q\2\2\u0139\u013a\7h\2\2\u013a9\3\2\2" - + "\2\u013b\u013c\7#\2\2\u013c;\3\2\2\2\u013d\u013e\7\u0080\2\2\u013e=\3" - + "\2\2\2\u013f\u0140\7,\2\2\u0140?\3\2\2\2\u0141\u0142\7\61\2\2\u0142\u0143" - + "\6 \2\2\u0143A\3\2\2\2\u0144\u0145\7\'\2\2\u0145C\3\2\2\2\u0146\u0147" - + "\7-\2\2\u0147E\3\2\2\2\u0148\u0149\7/\2\2\u0149G\3\2\2\2\u014a\u014b\7" - + ">\2\2\u014b\u014c\7>\2\2\u014cI\3\2\2\2\u014d\u014e\7@\2\2\u014e\u014f" - + "\7@\2\2\u014fK\3\2\2\2\u0150\u0151\7@\2\2\u0151\u0152\7@\2\2\u0152\u0153" - + "\7@\2\2\u0153M\3\2\2\2\u0154\u0155\7>\2\2\u0155O\3\2\2\2\u0156\u0157\7" - + ">\2\2\u0157\u0158\7?\2\2\u0158Q\3\2\2\2\u0159\u015a\7@\2\2\u015aS\3\2" - + "\2\2\u015b\u015c\7@\2\2\u015c\u015d\7?\2\2\u015dU\3\2\2\2\u015e\u015f" - + "\7?\2\2\u015f\u0160\7?\2\2\u0160W\3\2\2\2\u0161\u0162\7?\2\2\u0162\u0163" - + "\7?\2\2\u0163\u0164\7?\2\2\u0164Y\3\2\2\2\u0165\u0166\7#\2\2\u0166\u0167" - + "\7?\2\2\u0167[\3\2\2\2\u0168\u0169\7#\2\2\u0169\u016a\7?\2\2\u016a\u016b" - + "\7?\2\2\u016b]\3\2\2\2\u016c\u016d\7(\2\2\u016d_\3\2\2\2\u016e\u016f\7" - + "`\2\2\u016fa\3\2\2\2\u0170\u0171\7~\2\2\u0171c\3\2\2\2\u0172\u0173\7(" - + "\2\2\u0173\u0174\7(\2\2\u0174e\3\2\2\2\u0175\u0176\7~\2\2\u0176\u0177" - + "\7~\2\2\u0177g\3\2\2\2\u0178\u0179\7A\2\2\u0179i\3\2\2\2\u017a\u017b\7" - + "<\2\2\u017bk\3\2\2\2\u017c\u017d\7A\2\2\u017d\u017e\7<\2\2\u017em\3\2" - + "\2\2\u017f\u0180\7<\2\2\u0180\u0181\7<\2\2\u0181o\3\2\2\2\u0182\u0183" - + "\7/\2\2\u0183\u0184\7@\2\2\u0184q\3\2\2\2\u0185\u0186\7?\2\2\u0186\u0187" - + "\7\u0080\2\2\u0187s\3\2\2\2\u0188\u0189\7?\2\2\u0189\u018a\7?\2\2\u018a" - + "\u018b\7\u0080\2\2\u018bu\3\2\2\2\u018c\u018d\7-\2\2\u018d\u018e\7-\2" - + "\2\u018ew\3\2\2\2\u018f\u0190\7/\2\2\u0190\u0191\7/\2\2\u0191y\3\2\2\2" - + "\u0192\u0193\7?\2\2\u0193{\3\2\2\2\u0194\u0195\7-\2\2\u0195\u0196\7?\2" - + "\2\u0196}\3\2\2\2\u0197\u0198\7/\2\2\u0198\u0199\7?\2\2\u0199\177\3\2" - + "\2\2\u019a\u019b\7,\2\2\u019b\u019c\7?\2\2\u019c\u0081\3\2\2\2\u019d\u019e" - + "\7\61\2\2\u019e\u019f\7?\2\2\u019f\u0083\3\2\2\2\u01a0\u01a1\7\'\2\2\u01a1" - + "\u01a2\7?\2\2\u01a2\u0085\3\2\2\2\u01a3\u01a4\7(\2\2\u01a4\u01a5\7?\2" - + "\2\u01a5\u0087\3\2\2\2\u01a6\u01a7\7`\2\2\u01a7\u01a8\7?\2\2\u01a8\u0089" - + "\3\2\2\2\u01a9\u01aa\7~\2\2\u01aa\u01ab\7?\2\2\u01ab\u008b\3\2\2\2\u01ac" - + "\u01ad\7>\2\2\u01ad\u01ae\7>\2\2\u01ae\u01af\7?\2\2\u01af\u008d\3\2\2" - + "\2\u01b0\u01b1\7@\2\2\u01b1\u01b2\7@\2\2\u01b2\u01b3\7?\2\2\u01b3\u008f" - + "\3\2\2\2\u01b4\u01b5\7@\2\2\u01b5\u01b6\7@\2\2\u01b6\u01b7\7@\2\2\u01b7" - + "\u01b8\7?\2\2\u01b8\u0091\3\2\2\2\u01b9\u01bb\7\62\2\2\u01ba\u01bc\t\4" - + "\2\2\u01bb\u01ba\3\2\2\2\u01bc\u01bd\3\2\2\2\u01bd\u01bb\3\2\2\2\u01bd" - + "\u01be\3\2\2\2\u01be\u01c0\3\2\2\2\u01bf\u01c1\t\5\2\2\u01c0\u01bf\3\2" - + "\2\2\u01c0\u01c1\3\2\2\2\u01c1\u0093\3\2\2\2\u01c2\u01c3\7\62\2\2\u01c3" - + "\u01c5\t\6\2\2\u01c4\u01c6\t\7\2\2\u01c5\u01c4\3\2\2\2\u01c6\u01c7\3\2" - + "\2\2\u01c7\u01c5\3\2\2\2\u01c7\u01c8\3\2\2\2\u01c8\u01ca\3\2\2\2\u01c9" - + "\u01cb\t\5\2\2\u01ca\u01c9\3\2\2\2\u01ca\u01cb\3\2\2\2\u01cb\u0095\3\2" - + "\2\2\u01cc\u01d5\7\62\2\2\u01cd\u01d1\t\b\2\2\u01ce\u01d0\t\t\2\2\u01cf" - + "\u01ce\3\2\2\2\u01d0\u01d3\3\2\2\2\u01d1\u01cf\3\2\2\2\u01d1\u01d2\3\2" - + "\2\2\u01d2\u01d5\3\2\2\2\u01d3\u01d1\3\2\2\2\u01d4\u01cc\3\2\2\2\u01d4" - + "\u01cd\3\2\2\2\u01d5\u01d7\3\2\2\2\u01d6\u01d8\t\n\2\2\u01d7\u01d6\3\2" - + "\2\2\u01d7\u01d8\3\2\2\2\u01d8\u0097\3\2\2\2\u01d9\u01e2\7\62\2\2\u01da" - + "\u01de\t\b\2\2\u01db\u01dd\t\t\2\2\u01dc\u01db\3\2\2\2\u01dd\u01e0\3\2" - + "\2\2\u01de\u01dc\3\2\2\2\u01de\u01df\3\2\2\2\u01df\u01e2\3\2\2\2\u01e0" - + "\u01de\3\2\2\2\u01e1\u01d9\3\2\2\2\u01e1\u01da\3\2\2\2\u01e2\u01e9\3\2" - + "\2\2\u01e3\u01e5\5\24\n\2\u01e4\u01e6\t\t\2\2\u01e5\u01e4\3\2\2\2\u01e6" - + "\u01e7\3\2\2\2\u01e7\u01e5\3\2\2\2\u01e7\u01e8\3\2\2\2\u01e8\u01ea\3\2" - + "\2\2\u01e9\u01e3\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01f4\3\2\2\2\u01eb" - + "\u01ed\t\13\2\2\u01ec\u01ee\t\f\2\2\u01ed\u01ec\3\2\2\2\u01ed\u01ee\3" - + "\2\2\2\u01ee\u01f0\3\2\2\2\u01ef\u01f1\t\t\2\2\u01f0\u01ef\3\2\2\2\u01f1" - + "\u01f2\3\2\2\2\u01f2\u01f0\3\2\2\2\u01f2\u01f3\3\2\2\2\u01f3\u01f5\3\2" - + "\2\2\u01f4\u01eb\3\2\2\2\u01f4\u01f5\3\2\2\2\u01f5\u01f7\3\2\2\2\u01f6" - + "\u01f8\t\r\2\2\u01f7\u01f6\3\2\2\2\u01f7\u01f8\3\2\2\2\u01f8\u0099\3\2" - + "\2\2\u01f9\u0201\7$\2\2\u01fa\u01fb\7^\2\2\u01fb\u0200\7$\2\2\u01fc\u01fd" - + "\7^\2\2\u01fd\u0200\7^\2\2\u01fe\u0200\n\16\2\2\u01ff\u01fa\3\2\2\2\u01ff" - + "\u01fc\3\2\2\2\u01ff\u01fe\3\2\2\2\u0200\u0203\3\2\2\2\u0201\u0202\3\2" - + "\2\2\u0201\u01ff\3\2\2\2\u0202\u0204\3\2\2\2\u0203\u0201\3\2\2\2\u0204" - + "\u0212\7$\2\2\u0205\u020d\7)\2\2\u0206\u0207\7^\2\2\u0207\u020c\7)\2\2" - + "\u0208\u0209\7^\2\2\u0209\u020c\7^\2\2\u020a\u020c\n\17\2\2\u020b\u0206" - + "\3\2\2\2\u020b\u0208\3\2\2\2\u020b\u020a\3\2\2\2\u020c\u020f\3\2\2\2\u020d" - + "\u020e\3\2\2\2\u020d\u020b\3\2\2\2\u020e\u0210\3\2\2\2\u020f\u020d\3\2" - + "\2\2\u0210\u0212\7)\2\2\u0211\u01f9\3\2\2\2\u0211\u0205\3\2\2\2\u0212" - + "\u009b\3\2\2\2\u0213\u0217\7\61\2\2\u0214\u0215\7^\2\2\u0215\u0218\n\20" - + "\2\2\u0216\u0218\n\21\2\2\u0217\u0214\3\2\2\2\u0217\u0216\3\2\2\2\u0218" - + "\u0219\3\2\2\2\u0219\u021a\3\2\2\2\u0219\u0217\3\2\2\2\u021a\u021b\3\2" - + "\2\2\u021b\u021f\7\61\2\2\u021c\u021e\t\22\2\2\u021d\u021c\3\2\2\2\u021e" - + "\u0221\3\2\2\2\u021f\u021d\3\2\2\2\u021f\u0220\3\2\2\2\u0220\u0222\3\2" - + "\2\2\u0221\u021f\3\2\2\2\u0222\u0223\6N\3\2\u0223\u009d\3\2\2\2\u0224" - + "\u0225\7v\2\2\u0225\u0226\7t\2\2\u0226\u0227\7w\2\2\u0227\u0228\7g\2\2" - + "\u0228\u009f\3\2\2\2\u0229\u022a\7h\2\2\u022a\u022b\7c\2\2\u022b\u022c" - + "\7n\2\2\u022c\u022d\7u\2\2\u022d\u022e\7g\2\2\u022e\u00a1\3\2\2\2\u022f" - + "\u0230\7p\2\2\u0230\u0231\7w\2\2\u0231\u0232\7n\2\2\u0232\u0233\7n\2\2" - + "\u0233\u00a3\3\2\2\2\u0234\u0238\5\u00a6S\2\u0235\u0236\5\f\6\2\u0236" - + "\u0237\5\16\7\2\u0237\u0239\3\2\2\2\u0238\u0235\3\2\2\2\u0239\u023a\3" - + "\2\2\2\u023a\u0238\3\2\2\2\u023a\u023b\3\2\2\2\u023b\u00a5\3\2\2\2\u023c" - + "\u0242\5\u00a8T\2\u023d\u023e\5\24\n\2\u023e\u023f\5\u00a8T\2\u023f\u0241" - + "\3\2\2\2\u0240\u023d\3\2\2\2\u0241\u0244\3\2\2\2\u0242\u0240\3\2\2\2\u0242" - + "\u0243\3\2\2\2\u0243\u0245\3\2\2\2\u0244\u0242\3\2\2\2\u0245\u0246\6S" - + "\4\2\u0246\u00a7\3\2\2\2\u0247\u024b\t\23\2\2\u0248\u024a\t\24\2\2\u0249" - + "\u0248\3\2\2\2\u024a\u024d\3\2\2\2\u024b\u0249\3\2\2\2\u024b\u024c\3\2" - + "\2\2\u024c\u00a9\3\2\2\2\u024d\u024b\3\2\2\2\u024e\u024f\13\2\2\2\u024f" - + "\u0250\3\2\2\2\u0250\u0251\bU\2\2\u0251\u00ab\3\2\2\2\u0252\u025b\7\62" - + "\2\2\u0253\u0257\t\b\2\2\u0254\u0256\t\t\2\2\u0255\u0254\3\2\2\2\u0256" - + "\u0259\3\2\2\2\u0257\u0255\3\2\2\2\u0257\u0258\3\2\2\2\u0258\u025b\3\2" - + "\2\2\u0259\u0257\3\2\2\2\u025a\u0252\3\2\2\2\u025a\u0253\3\2\2\2\u025b" - + "\u025c\3\2\2\2\u025c\u025d\bV\4\2\u025d\u00ad\3\2\2\2\u025e\u0262\t\23" - + "\2\2\u025f\u0261\t\24\2\2\u0260\u025f\3\2\2\2\u0261\u0264\3\2\2\2\u0262" - + "\u0260\3\2\2\2\u0262\u0263\3\2\2\2\u0263\u0265\3\2\2\2\u0264\u0262\3\2" - + "\2\2\u0265\u0266\bW\4\2\u0266\u00af\3\2\2\2%\2\3\u00b3\u00bd\u00c7\u00cc" - + "\u01bd\u01c0\u01c7\u01ca\u01d1\u01d4\u01d7\u01de\u01e1\u01e7\u01e9\u01ed" - + "\u01f2\u01f4\u01f7\u01ff\u0201\u020b\u020d\u0211\u0217\u0219\u021f\u023a" - + "\u0242\u024b\u0257\u025a\u0262\5\b\2\2\4\3\2\4\2\2"; + public static final String _serializedATN = "\u0004\u0000V\u0265\u0006\uffff\uffff\u0006\uffff\uffff\u0002\u0000\u0007" + + "\u0000\u0002\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007" + + "\u0003\u0002\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007" + + "\u0006\u0002\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n" + + "\u0007\n\u0002\u000b\u0007\u000b\u0002\f\u0007\f\u0002\r\u0007\r\u0002" + + "\u000e\u0007\u000e\u0002\u000f\u0007\u000f\u0002\u0010\u0007\u0010\u0002" + + "\u0011\u0007\u0011\u0002\u0012\u0007\u0012\u0002\u0013\u0007\u0013\u0002" + + "\u0014\u0007\u0014\u0002\u0015\u0007\u0015\u0002\u0016\u0007\u0016\u0002" + + "\u0017\u0007\u0017\u0002\u0018\u0007\u0018\u0002\u0019\u0007\u0019\u0002" + + "\u001a\u0007\u001a\u0002\u001b\u0007\u001b\u0002\u001c\u0007\u001c\u0002" + + "\u001d\u0007\u001d\u0002\u001e\u0007\u001e\u0002\u001f\u0007\u001f\u0002" + + " \u0007 \u0002!\u0007!\u0002\"\u0007\"\u0002#\u0007#\u0002$\u0007$\u0002" + + "%\u0007%\u0002&\u0007&\u0002\'\u0007\'\u0002(\u0007(\u0002)\u0007)\u0002" + + "*\u0007*\u0002+\u0007+\u0002,\u0007,\u0002-\u0007-\u0002.\u0007.\u0002" + + "/\u0007/\u00020\u00070\u00021\u00071\u00022\u00072\u00023\u00073\u0002" + + "4\u00074\u00025\u00075\u00026\u00076\u00027\u00077\u00028\u00078\u0002" + + "9\u00079\u0002:\u0007:\u0002;\u0007;\u0002<\u0007<\u0002=\u0007=\u0002" + + ">\u0007>\u0002?\u0007?\u0002@\u0007@\u0002A\u0007A\u0002B\u0007B\u0002" + + "C\u0007C\u0002D\u0007D\u0002E\u0007E\u0002F\u0007F\u0002G\u0007G\u0002" + + "H\u0007H\u0002I\u0007I\u0002J\u0007J\u0002K\u0007K\u0002L\u0007L\u0002" + + "M\u0007M\u0002N\u0007N\u0002O\u0007O\u0002P\u0007P\u0002Q\u0007Q\u0002" + + "R\u0007R\u0002S\u0007S\u0002T\u0007T\u0002U\u0007U\u0001\u0000\u0004\u0000" + + "\u00b0\b\u0000\u000b\u0000\f\u0000\u00b1\u0001\u0000\u0001\u0000\u0001" + + "\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001\u00ba\b\u0001\n" + + "\u0001\f\u0001\u00bd\t\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001" + + "\u0001\u0001\u0001\u0005\u0001\u00c4\b\u0001\n\u0001\f\u0001\u00c7\t\u0001" + + "\u0001\u0001\u0001\u0001\u0003\u0001\u00cb\b\u0001\u0001\u0001\u0001\u0001" + + "\u0001\u0002\u0001\u0002\u0001\u0003\u0001\u0003\u0001\u0004\u0001\u0004" + + "\u0001\u0005\u0001\u0005\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007" + + "\u0001\b\u0001\b\u0001\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001" + + "\t\u0001\n\u0001\n\u0001\u000b\u0001\u000b\u0001\f\u0001\f\u0001\f\u0001" + + "\r\u0001\r\u0001\r\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001" + + "\u000e\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001" + + "\u000f\u0001\u0010\u0001\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001" + + "\u0011\u0001\u0011\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001" + + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001" + + "\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0014\u0001" + + "\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001" + + "\u0015\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001" + + "\u0016\u0001\u0016\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001" + + "\u0017\u0001\u0017\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001" + + "\u0018\u0001\u0018\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001" + + "\u0019\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001" + + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001" + + "\u001b\u0001\u001b\u0001\u001c\u0001\u001c\u0001\u001d\u0001\u001d\u0001" + + "\u001e\u0001\u001e\u0001\u001e\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001" + + "!\u0001!\u0001\"\u0001\"\u0001\"\u0001#\u0001#\u0001#\u0001$\u0001$\u0001" + + "$\u0001$\u0001%\u0001%\u0001&\u0001&\u0001&\u0001\'\u0001\'\u0001(\u0001" + + "(\u0001(\u0001)\u0001)\u0001)\u0001*\u0001*\u0001*\u0001*\u0001+\u0001" + + "+\u0001+\u0001,\u0001,\u0001,\u0001,\u0001-\u0001-\u0001.\u0001.\u0001" + + "/\u0001/\u00010\u00010\u00010\u00011\u00011\u00011\u00012\u00012\u0001" + + "3\u00013\u00014\u00014\u00014\u00015\u00015\u00015\u00016\u00016\u0001" + + "6\u00017\u00017\u00017\u00018\u00018\u00018\u00018\u00019\u00019\u0001" + + "9\u0001:\u0001:\u0001:\u0001;\u0001;\u0001<\u0001<\u0001<\u0001=\u0001" + + "=\u0001=\u0001>\u0001>\u0001>\u0001?\u0001?\u0001?\u0001@\u0001@\u0001" + + "@\u0001A\u0001A\u0001A\u0001B\u0001B\u0001B\u0001C\u0001C\u0001C\u0001" + + "D\u0001D\u0001D\u0001D\u0001E\u0001E\u0001E\u0001E\u0001F\u0001F\u0001" + + "F\u0001F\u0001F\u0001G\u0001G\u0004G\u01ba\bG\u000bG\fG\u01bb\u0001G\u0003" + + "G\u01bf\bG\u0001H\u0001H\u0001H\u0004H\u01c4\bH\u000bH\fH\u01c5\u0001" + + "H\u0003H\u01c9\bH\u0001I\u0001I\u0001I\u0005I\u01ce\bI\nI\fI\u01d1\tI" + + "\u0003I\u01d3\bI\u0001I\u0003I\u01d6\bI\u0001J\u0001J\u0001J\u0005J\u01db" + + "\bJ\nJ\fJ\u01de\tJ\u0003J\u01e0\bJ\u0001J\u0001J\u0004J\u01e4\bJ\u000b" + + "J\fJ\u01e5\u0003J\u01e8\bJ\u0001J\u0001J\u0003J\u01ec\bJ\u0001J\u0004" + + "J\u01ef\bJ\u000bJ\fJ\u01f0\u0003J\u01f3\bJ\u0001J\u0003J\u01f6\bJ\u0001" + + "K\u0001K\u0001K\u0001K\u0001K\u0001K\u0005K\u01fe\bK\nK\fK\u0201\tK\u0001" + + "K\u0001K\u0001K\u0001K\u0001K\u0001K\u0001K\u0005K\u020a\bK\nK\fK\u020d" + + "\tK\u0001K\u0003K\u0210\bK\u0001L\u0001L\u0001L\u0001L\u0004L\u0216\b" + + "L\u000bL\fL\u0217\u0001L\u0001L\u0005L\u021c\bL\nL\fL\u021f\tL\u0001L" + + "\u0001L\u0001M\u0001M\u0001M\u0001M\u0001M\u0001N\u0001N\u0001N\u0001" + + "N\u0001N\u0001N\u0001O\u0001O\u0001O\u0001O\u0001O\u0001P\u0001P\u0001" + + "P\u0001P\u0004P\u0237\bP\u000bP\fP\u0238\u0001Q\u0001Q\u0001Q\u0001Q\u0005" + + "Q\u023f\bQ\nQ\fQ\u0242\tQ\u0001Q\u0001Q\u0001R\u0001R\u0005R\u0248\bR" + + "\nR\fR\u024b\tR\u0001S\u0001S\u0001S\u0001S\u0001T\u0001T\u0001T\u0005" + + "T\u0254\bT\nT\fT\u0257\tT\u0003T\u0259\bT\u0001T\u0001T\u0001U\u0001U" + + "\u0005U\u025f\bU\nU\fU\u0262\tU\u0001U\u0001U\u0005\u00bb\u00c5\u01ff" + + "\u020b\u0217\u0000V\u0002\u0001\u0004\u0002\u0006\u0003\b\u0004\n\u0005" + + "\f\u0006\u000e\u0007\u0010\b\u0012\t\u0014\n\u0016\u000b\u0018\f\u001a" + + "\r\u001c\u000e\u001e\u000f \u0010\"\u0011$\u0012&\u0013(\u0014*\u0015" + + ",\u0016.\u00170\u00182\u00194\u001a6\u001b8\u001c:\u001d<\u001e>\u001f" + + "@ B!D\"F#H$J%L&N\'P(R)T*V+X,Z-\\.^/`0b1d2f3h4j5l6n7p8r9t:v;x~?\u0080" + + "@\u0082A\u0084B\u0086C\u0088D\u008aE\u008cF\u008eG\u0090H\u0092I\u0094" + + "J\u0096K\u0098L\u009aM\u009cN\u009eO\u00a0P\u00a2Q\u00a4R\u00a6S\u00a8" + + "T\u00aaU\u00acV\u0002\u0000\u0001\u0013\u0003\u0000\t\n\r\r \u0002\u0000" + + "\n\n\r\r\u0001\u000007\u0002\u0000LLll\u0002\u0000XXxx\u0003\u000009A" + + "Faf\u0001\u000019\u0001\u000009\u0006\u0000DDFFLLddffll\u0002\u0000EE" + + "ee\u0002\u0000++--\u0004\u0000DDFFddff\u0002\u0000\"\"\\\\\u0002\u0000" + + "\'\'\\\\\u0001\u0000\n\n\u0002\u0000\n\n//\u0007\u0000UUcciilmssuuxx\u0003" + + "\u0000AZ__az\u0004\u000009AZ__az\u0286\u0000\u0002\u0001\u0000\u0000\u0000" + + "\u0000\u0004\u0001\u0000\u0000\u0000\u0000\u0006\u0001\u0000\u0000\u0000" + + "\u0000\b\u0001\u0000\u0000\u0000\u0000\n\u0001\u0000\u0000\u0000\u0000" + + "\f\u0001\u0000\u0000\u0000\u0000\u000e\u0001\u0000\u0000\u0000\u0000\u0010" + + "\u0001\u0000\u0000\u0000\u0000\u0012\u0001\u0000\u0000\u0000\u0000\u0014" + + "\u0001\u0000\u0000\u0000\u0000\u0016\u0001\u0000\u0000\u0000\u0000\u0018" + + "\u0001\u0000\u0000\u0000\u0000\u001a\u0001\u0000\u0000\u0000\u0000\u001c" + + "\u0001\u0000\u0000\u0000\u0000\u001e\u0001\u0000\u0000\u0000\u0000 \u0001" + + "\u0000\u0000\u0000\u0000\"\u0001\u0000\u0000\u0000\u0000$\u0001\u0000" + + "\u0000\u0000\u0000&\u0001\u0000\u0000\u0000\u0000(\u0001\u0000\u0000\u0000" + + "\u0000*\u0001\u0000\u0000\u0000\u0000,\u0001\u0000\u0000\u0000\u0000." + + "\u0001\u0000\u0000\u0000\u00000\u0001\u0000\u0000\u0000\u00002\u0001\u0000" + + "\u0000\u0000\u00004\u0001\u0000\u0000\u0000\u00006\u0001\u0000\u0000\u0000" + + "\u00008\u0001\u0000\u0000\u0000\u0000:\u0001\u0000\u0000\u0000\u0000<" + + "\u0001\u0000\u0000\u0000\u0000>\u0001\u0000\u0000\u0000\u0000@\u0001\u0000" + + "\u0000\u0000\u0000B\u0001\u0000\u0000\u0000\u0000D\u0001\u0000\u0000\u0000" + + "\u0000F\u0001\u0000\u0000\u0000\u0000H\u0001\u0000\u0000\u0000\u0000J" + + "\u0001\u0000\u0000\u0000\u0000L\u0001\u0000\u0000\u0000\u0000N\u0001\u0000" + + "\u0000\u0000\u0000P\u0001\u0000\u0000\u0000\u0000R\u0001\u0000\u0000\u0000" + + "\u0000T\u0001\u0000\u0000\u0000\u0000V\u0001\u0000\u0000\u0000\u0000X" + + "\u0001\u0000\u0000\u0000\u0000Z\u0001\u0000\u0000\u0000\u0000\\\u0001" + + "\u0000\u0000\u0000\u0000^\u0001\u0000\u0000\u0000\u0000`\u0001\u0000\u0000" + + "\u0000\u0000b\u0001\u0000\u0000\u0000\u0000d\u0001\u0000\u0000\u0000\u0000" + + "f\u0001\u0000\u0000\u0000\u0000h\u0001\u0000\u0000\u0000\u0000j\u0001" + + "\u0000\u0000\u0000\u0000l\u0001\u0000\u0000\u0000\u0000n\u0001\u0000\u0000" + + "\u0000\u0000p\u0001\u0000\u0000\u0000\u0000r\u0001\u0000\u0000\u0000\u0000" + + "t\u0001\u0000\u0000\u0000\u0000v\u0001\u0000\u0000\u0000\u0000x\u0001" + + "\u0000\u0000\u0000\u0000z\u0001\u0000\u0000\u0000\u0000|\u0001\u0000\u0000" + + "\u0000\u0000~\u0001\u0000\u0000\u0000\u0000\u0080\u0001\u0000\u0000\u0000" + + "\u0000\u0082\u0001\u0000\u0000\u0000\u0000\u0084\u0001\u0000\u0000\u0000" + + "\u0000\u0086\u0001\u0000\u0000\u0000\u0000\u0088\u0001\u0000\u0000\u0000" + + "\u0000\u008a\u0001\u0000\u0000\u0000\u0000\u008c\u0001\u0000\u0000\u0000" + + "\u0000\u008e\u0001\u0000\u0000\u0000\u0000\u0090\u0001\u0000\u0000\u0000" + + "\u0000\u0092\u0001\u0000\u0000\u0000\u0000\u0094\u0001\u0000\u0000\u0000" + + "\u0000\u0096\u0001\u0000\u0000\u0000\u0000\u0098\u0001\u0000\u0000\u0000" + + "\u0000\u009a\u0001\u0000\u0000\u0000\u0000\u009c\u0001\u0000\u0000\u0000" + + "\u0000\u009e\u0001\u0000\u0000\u0000\u0000\u00a0\u0001\u0000\u0000\u0000" + + "\u0000\u00a2\u0001\u0000\u0000\u0000\u0000\u00a4\u0001\u0000\u0000\u0000" + + "\u0000\u00a6\u0001\u0000\u0000\u0000\u0000\u00a8\u0001\u0000\u0000\u0000" + + "\u0001\u00aa\u0001\u0000\u0000\u0000\u0001\u00ac\u0001\u0000\u0000\u0000" + + "\u0002\u00af\u0001\u0000\u0000\u0000\u0004\u00ca\u0001\u0000\u0000\u0000" + + "\u0006\u00ce\u0001\u0000\u0000\u0000\b\u00d0\u0001\u0000\u0000\u0000\n" + + "\u00d2\u0001\u0000\u0000\u0000\f\u00d4\u0001\u0000\u0000\u0000\u000e\u00d6" + + "\u0001\u0000\u0000\u0000\u0010\u00d8\u0001\u0000\u0000\u0000\u0012\u00da" + + "\u0001\u0000\u0000\u0000\u0014\u00de\u0001\u0000\u0000\u0000\u0016\u00e3" + + "\u0001\u0000\u0000\u0000\u0018\u00e5\u0001\u0000\u0000\u0000\u001a\u00e7" + + "\u0001\u0000\u0000\u0000\u001c\u00ea\u0001\u0000\u0000\u0000\u001e\u00ed" + + "\u0001\u0000\u0000\u0000 \u00f2\u0001\u0000\u0000\u0000\"\u00f8\u0001" + + "\u0000\u0000\u0000$\u00fb\u0001\u0000\u0000\u0000&\u00ff\u0001\u0000\u0000" + + "\u0000(\u0108\u0001\u0000\u0000\u0000*\u010e\u0001\u0000\u0000\u0000," + + "\u0115\u0001\u0000\u0000\u0000.\u0119\u0001\u0000\u0000\u00000\u011d\u0001" + + "\u0000\u0000\u00002\u0123\u0001\u0000\u0000\u00004\u0129\u0001\u0000\u0000" + + "\u00006\u012e\u0001\u0000\u0000\u00008\u0139\u0001\u0000\u0000\u0000:" + + "\u013b\u0001\u0000\u0000\u0000<\u013d\u0001\u0000\u0000\u0000>\u013f\u0001" + + "\u0000\u0000\u0000@\u0142\u0001\u0000\u0000\u0000B\u0144\u0001\u0000\u0000" + + "\u0000D\u0146\u0001\u0000\u0000\u0000F\u0148\u0001\u0000\u0000\u0000H" + + "\u014b\u0001\u0000\u0000\u0000J\u014e\u0001\u0000\u0000\u0000L\u0152\u0001" + + "\u0000\u0000\u0000N\u0154\u0001\u0000\u0000\u0000P\u0157\u0001\u0000\u0000" + + "\u0000R\u0159\u0001\u0000\u0000\u0000T\u015c\u0001\u0000\u0000\u0000V" + + "\u015f\u0001\u0000\u0000\u0000X\u0163\u0001\u0000\u0000\u0000Z\u0166\u0001" + + "\u0000\u0000\u0000\\\u016a\u0001\u0000\u0000\u0000^\u016c\u0001\u0000" + + "\u0000\u0000`\u016e\u0001\u0000\u0000\u0000b\u0170\u0001\u0000\u0000\u0000" + + "d\u0173\u0001\u0000\u0000\u0000f\u0176\u0001\u0000\u0000\u0000h\u0178" + + "\u0001\u0000\u0000\u0000j\u017a\u0001\u0000\u0000\u0000l\u017d\u0001\u0000" + + "\u0000\u0000n\u0180\u0001\u0000\u0000\u0000p\u0183\u0001\u0000\u0000\u0000" + + "r\u0186\u0001\u0000\u0000\u0000t\u018a\u0001\u0000\u0000\u0000v\u018d" + + "\u0001\u0000\u0000\u0000x\u0190\u0001\u0000\u0000\u0000z\u0192\u0001\u0000" + + "\u0000\u0000|\u0195\u0001\u0000\u0000\u0000~\u0198\u0001\u0000\u0000\u0000" + + "\u0080\u019b\u0001\u0000\u0000\u0000\u0082\u019e\u0001\u0000\u0000\u0000" + + "\u0084\u01a1\u0001\u0000\u0000\u0000\u0086\u01a4\u0001\u0000\u0000\u0000" + + "\u0088\u01a7\u0001\u0000\u0000\u0000\u008a\u01aa\u0001\u0000\u0000\u0000" + + "\u008c\u01ae\u0001\u0000\u0000\u0000\u008e\u01b2\u0001\u0000\u0000\u0000" + + "\u0090\u01b7\u0001\u0000\u0000\u0000\u0092\u01c0\u0001\u0000\u0000\u0000" + + "\u0094\u01d2\u0001\u0000\u0000\u0000\u0096\u01df\u0001\u0000\u0000\u0000" + + "\u0098\u020f\u0001\u0000\u0000\u0000\u009a\u0211\u0001\u0000\u0000\u0000" + + "\u009c\u0222\u0001\u0000\u0000\u0000\u009e\u0227\u0001\u0000\u0000\u0000" + + "\u00a0\u022d\u0001\u0000\u0000\u0000\u00a2\u0232\u0001\u0000\u0000\u0000" + + "\u00a4\u023a\u0001\u0000\u0000\u0000\u00a6\u0245\u0001\u0000\u0000\u0000" + + "\u00a8\u024c\u0001\u0000\u0000\u0000\u00aa\u0258\u0001\u0000\u0000\u0000" + + "\u00ac\u025c\u0001\u0000\u0000\u0000\u00ae\u00b0\u0007\u0000\u0000\u0000" + + "\u00af\u00ae\u0001\u0000\u0000\u0000\u00b0\u00b1\u0001\u0000\u0000\u0000" + + "\u00b1\u00af\u0001\u0000\u0000\u0000\u00b1\u00b2\u0001\u0000\u0000\u0000" + + "\u00b2\u00b3\u0001\u0000\u0000\u0000\u00b3\u00b4\u0006\u0000\u0000\u0000" + + "\u00b4\u0003\u0001\u0000\u0000\u0000\u00b5\u00b6\u0005/\u0000\u0000\u00b6" + + "\u00b7\u0005/\u0000\u0000\u00b7\u00bb\u0001\u0000\u0000\u0000\u00b8\u00ba" + + "\t\u0000\u0000\u0000\u00b9\u00b8\u0001\u0000\u0000\u0000\u00ba\u00bd\u0001" + + "\u0000\u0000\u0000\u00bb\u00bc\u0001\u0000\u0000\u0000\u00bb\u00b9\u0001" + + "\u0000\u0000\u0000\u00bc\u00be\u0001\u0000\u0000\u0000\u00bd\u00bb\u0001" + + "\u0000\u0000\u0000\u00be\u00cb\u0007\u0001\u0000\u0000\u00bf\u00c0\u0005" + + "/\u0000\u0000\u00c0\u00c1\u0005*\u0000\u0000\u00c1\u00c5\u0001\u0000\u0000" + + "\u0000\u00c2\u00c4\t\u0000\u0000\u0000\u00c3\u00c2\u0001\u0000\u0000\u0000" + + "\u00c4\u00c7\u0001\u0000\u0000\u0000\u00c5\u00c6\u0001\u0000\u0000\u0000" + + "\u00c5\u00c3\u0001\u0000\u0000\u0000\u00c6\u00c8\u0001\u0000\u0000\u0000" + + "\u00c7\u00c5\u0001\u0000\u0000\u0000\u00c8\u00c9\u0005*\u0000\u0000\u00c9" + + "\u00cb\u0005/\u0000\u0000\u00ca\u00b5\u0001\u0000\u0000\u0000\u00ca\u00bf" + + "\u0001\u0000\u0000\u0000\u00cb\u00cc\u0001\u0000\u0000\u0000\u00cc\u00cd" + + "\u0006\u0001\u0000\u0000\u00cd\u0005\u0001\u0000\u0000\u0000\u00ce\u00cf" + + "\u0005{\u0000\u0000\u00cf\u0007\u0001\u0000\u0000\u0000\u00d0\u00d1\u0005" + + "}\u0000\u0000\u00d1\t\u0001\u0000\u0000\u0000\u00d2\u00d3\u0005[\u0000" + + "\u0000\u00d3\u000b\u0001\u0000\u0000\u0000\u00d4\u00d5\u0005]\u0000\u0000" + + "\u00d5\r\u0001\u0000\u0000\u0000\u00d6\u00d7\u0005(\u0000\u0000\u00d7" + + "\u000f\u0001\u0000\u0000\u0000\u00d8\u00d9\u0005)\u0000\u0000\u00d9\u0011" + + "\u0001\u0000\u0000\u0000\u00da\u00db\u0005.\u0000\u0000\u00db\u00dc\u0001" + + "\u0000\u0000\u0000\u00dc\u00dd\u0006\b\u0001\u0000\u00dd\u0013\u0001\u0000" + + "\u0000\u0000\u00de\u00df\u0005?\u0000\u0000\u00df\u00e0\u0005.\u0000\u0000" + + "\u00e0\u00e1\u0001\u0000\u0000\u0000\u00e1\u00e2\u0006\t\u0001\u0000\u00e2" + + "\u0015\u0001\u0000\u0000\u0000\u00e3\u00e4\u0005,\u0000\u0000\u00e4\u0017" + + "\u0001\u0000\u0000\u0000\u00e5\u00e6\u0005;\u0000\u0000\u00e6\u0019\u0001" + + "\u0000\u0000\u0000\u00e7\u00e8\u0005i\u0000\u0000\u00e8\u00e9\u0005f\u0000" + + "\u0000\u00e9\u001b\u0001\u0000\u0000\u0000\u00ea\u00eb\u0005i\u0000\u0000" + + "\u00eb\u00ec\u0005n\u0000\u0000\u00ec\u001d\u0001\u0000\u0000\u0000\u00ed" + + "\u00ee\u0005e\u0000\u0000\u00ee\u00ef\u0005l\u0000\u0000\u00ef\u00f0\u0005" + + "s\u0000\u0000\u00f0\u00f1\u0005e\u0000\u0000\u00f1\u001f\u0001\u0000\u0000" + + "\u0000\u00f2\u00f3\u0005w\u0000\u0000\u00f3\u00f4\u0005h\u0000\u0000\u00f4" + + "\u00f5\u0005i\u0000\u0000\u00f5\u00f6\u0005l\u0000\u0000\u00f6\u00f7\u0005" + + "e\u0000\u0000\u00f7!\u0001\u0000\u0000\u0000\u00f8\u00f9\u0005d\u0000" + + "\u0000\u00f9\u00fa\u0005o\u0000\u0000\u00fa#\u0001\u0000\u0000\u0000\u00fb" + + "\u00fc\u0005f\u0000\u0000\u00fc\u00fd\u0005o\u0000\u0000\u00fd\u00fe\u0005" + + "r\u0000\u0000\u00fe%\u0001\u0000\u0000\u0000\u00ff\u0100\u0005c\u0000" + + "\u0000\u0100\u0101\u0005o\u0000\u0000\u0101\u0102\u0005n\u0000\u0000\u0102" + + "\u0103\u0005t\u0000\u0000\u0103\u0104\u0005i\u0000\u0000\u0104\u0105\u0005" + + "n\u0000\u0000\u0105\u0106\u0005u\u0000\u0000\u0106\u0107\u0005e\u0000" + + "\u0000\u0107\'\u0001\u0000\u0000\u0000\u0108\u0109\u0005b\u0000\u0000" + + "\u0109\u010a\u0005r\u0000\u0000\u010a\u010b\u0005e\u0000\u0000\u010b\u010c" + + "\u0005a\u0000\u0000\u010c\u010d\u0005k\u0000\u0000\u010d)\u0001\u0000" + + "\u0000\u0000\u010e\u010f\u0005r\u0000\u0000\u010f\u0110\u0005e\u0000\u0000" + + "\u0110\u0111\u0005t\u0000\u0000\u0111\u0112\u0005u\u0000\u0000\u0112\u0113" + + "\u0005r\u0000\u0000\u0113\u0114\u0005n\u0000\u0000\u0114+\u0001\u0000" + + "\u0000\u0000\u0115\u0116\u0005n\u0000\u0000\u0116\u0117\u0005e\u0000\u0000" + + "\u0117\u0118\u0005w\u0000\u0000\u0118-\u0001\u0000\u0000\u0000\u0119\u011a" + + "\u0005t\u0000\u0000\u011a\u011b\u0005r\u0000\u0000\u011b\u011c\u0005y" + + "\u0000\u0000\u011c/\u0001\u0000\u0000\u0000\u011d\u011e\u0005c\u0000\u0000" + + "\u011e\u011f\u0005a\u0000\u0000\u011f\u0120\u0005t\u0000\u0000\u0120\u0121" + + "\u0005c\u0000\u0000\u0121\u0122\u0005h\u0000\u0000\u01221\u0001\u0000" + + "\u0000\u0000\u0123\u0124\u0005t\u0000\u0000\u0124\u0125\u0005h\u0000\u0000" + + "\u0125\u0126\u0005r\u0000\u0000\u0126\u0127\u0005o\u0000\u0000\u0127\u0128" + + "\u0005w\u0000\u0000\u01283\u0001\u0000\u0000\u0000\u0129\u012a\u0005t" + + "\u0000\u0000\u012a\u012b\u0005h\u0000\u0000\u012b\u012c\u0005i\u0000\u0000" + + "\u012c\u012d\u0005s\u0000\u0000\u012d5\u0001\u0000\u0000\u0000\u012e\u012f" + + "\u0005i\u0000\u0000\u012f\u0130\u0005n\u0000\u0000\u0130\u0131\u0005s" + + "\u0000\u0000\u0131\u0132\u0005t\u0000\u0000\u0132\u0133\u0005a\u0000\u0000" + + "\u0133\u0134\u0005n\u0000\u0000\u0134\u0135\u0005c\u0000\u0000\u0135\u0136" + + "\u0005e\u0000\u0000\u0136\u0137\u0005o\u0000\u0000\u0137\u0138\u0005f" + + "\u0000\u0000\u01387\u0001\u0000\u0000\u0000\u0139\u013a\u0005!\u0000\u0000" + + "\u013a9\u0001\u0000\u0000\u0000\u013b\u013c\u0005~\u0000\u0000\u013c;" + + "\u0001\u0000\u0000\u0000\u013d\u013e\u0005*\u0000\u0000\u013e=\u0001\u0000" + + "\u0000\u0000\u013f\u0140\u0005/\u0000\u0000\u0140\u0141\u0004\u001e\u0000" + + "\u0000\u0141?\u0001\u0000\u0000\u0000\u0142\u0143\u0005%\u0000\u0000\u0143" + + "A\u0001\u0000\u0000\u0000\u0144\u0145\u0005+\u0000\u0000\u0145C\u0001" + + "\u0000\u0000\u0000\u0146\u0147\u0005-\u0000\u0000\u0147E\u0001\u0000\u0000" + + "\u0000\u0148\u0149\u0005<\u0000\u0000\u0149\u014a\u0005<\u0000\u0000\u014a" + + "G\u0001\u0000\u0000\u0000\u014b\u014c\u0005>\u0000\u0000\u014c\u014d\u0005" + + ">\u0000\u0000\u014dI\u0001\u0000\u0000\u0000\u014e\u014f\u0005>\u0000" + + "\u0000\u014f\u0150\u0005>\u0000\u0000\u0150\u0151\u0005>\u0000\u0000\u0151" + + "K\u0001\u0000\u0000\u0000\u0152\u0153\u0005<\u0000\u0000\u0153M\u0001" + + "\u0000\u0000\u0000\u0154\u0155\u0005<\u0000\u0000\u0155\u0156\u0005=\u0000" + + "\u0000\u0156O\u0001\u0000\u0000\u0000\u0157\u0158\u0005>\u0000\u0000\u0158" + + "Q\u0001\u0000\u0000\u0000\u0159\u015a\u0005>\u0000\u0000\u015a\u015b\u0005" + + "=\u0000\u0000\u015bS\u0001\u0000\u0000\u0000\u015c\u015d\u0005=\u0000" + + "\u0000\u015d\u015e\u0005=\u0000\u0000\u015eU\u0001\u0000\u0000\u0000\u015f" + + "\u0160\u0005=\u0000\u0000\u0160\u0161\u0005=\u0000\u0000\u0161\u0162\u0005" + + "=\u0000\u0000\u0162W\u0001\u0000\u0000\u0000\u0163\u0164\u0005!\u0000" + + "\u0000\u0164\u0165\u0005=\u0000\u0000\u0165Y\u0001\u0000\u0000\u0000\u0166" + + "\u0167\u0005!\u0000\u0000\u0167\u0168\u0005=\u0000\u0000\u0168\u0169\u0005" + + "=\u0000\u0000\u0169[\u0001\u0000\u0000\u0000\u016a\u016b\u0005&\u0000" + + "\u0000\u016b]\u0001\u0000\u0000\u0000\u016c\u016d\u0005^\u0000\u0000\u016d" + + "_\u0001\u0000\u0000\u0000\u016e\u016f\u0005|\u0000\u0000\u016fa\u0001" + + "\u0000\u0000\u0000\u0170\u0171\u0005&\u0000\u0000\u0171\u0172\u0005&\u0000" + + "\u0000\u0172c\u0001\u0000\u0000\u0000\u0173\u0174\u0005|\u0000\u0000\u0174" + + "\u0175\u0005|\u0000\u0000\u0175e\u0001\u0000\u0000\u0000\u0176\u0177\u0005" + + "?\u0000\u0000\u0177g\u0001\u0000\u0000\u0000\u0178\u0179\u0005:\u0000" + + "\u0000\u0179i\u0001\u0000\u0000\u0000\u017a\u017b\u0005?\u0000\u0000\u017b" + + "\u017c\u0005:\u0000\u0000\u017ck\u0001\u0000\u0000\u0000\u017d\u017e\u0005" + + ":\u0000\u0000\u017e\u017f\u0005:\u0000\u0000\u017fm\u0001\u0000\u0000" + + "\u0000\u0180\u0181\u0005-\u0000\u0000\u0181\u0182\u0005>\u0000\u0000\u0182" + + "o\u0001\u0000\u0000\u0000\u0183\u0184\u0005=\u0000\u0000\u0184\u0185\u0005" + + "~\u0000\u0000\u0185q\u0001\u0000\u0000\u0000\u0186\u0187\u0005=\u0000" + + "\u0000\u0187\u0188\u0005=\u0000\u0000\u0188\u0189\u0005~\u0000\u0000\u0189" + + "s\u0001\u0000\u0000\u0000\u018a\u018b\u0005+\u0000\u0000\u018b\u018c\u0005" + + "+\u0000\u0000\u018cu\u0001\u0000\u0000\u0000\u018d\u018e\u0005-\u0000" + + "\u0000\u018e\u018f\u0005-\u0000\u0000\u018fw\u0001\u0000\u0000\u0000\u0190" + + "\u0191\u0005=\u0000\u0000\u0191y\u0001\u0000\u0000\u0000\u0192\u0193\u0005" + + "+\u0000\u0000\u0193\u0194\u0005=\u0000\u0000\u0194{\u0001\u0000\u0000" + + "\u0000\u0195\u0196\u0005-\u0000\u0000\u0196\u0197\u0005=\u0000\u0000\u0197" + + "}\u0001\u0000\u0000\u0000\u0198\u0199\u0005*\u0000\u0000\u0199\u019a\u0005" + + "=\u0000\u0000\u019a\u007f\u0001\u0000\u0000\u0000\u019b\u019c\u0005/\u0000" + + "\u0000\u019c\u019d\u0005=\u0000\u0000\u019d\u0081\u0001\u0000\u0000\u0000" + + "\u019e\u019f\u0005%\u0000\u0000\u019f\u01a0\u0005=\u0000\u0000\u01a0\u0083" + + "\u0001\u0000\u0000\u0000\u01a1\u01a2\u0005&\u0000\u0000\u01a2\u01a3\u0005" + + "=\u0000\u0000\u01a3\u0085\u0001\u0000\u0000\u0000\u01a4\u01a5\u0005^\u0000" + + "\u0000\u01a5\u01a6\u0005=\u0000\u0000\u01a6\u0087\u0001\u0000\u0000\u0000" + + "\u01a7\u01a8\u0005|\u0000\u0000\u01a8\u01a9\u0005=\u0000\u0000\u01a9\u0089" + + "\u0001\u0000\u0000\u0000\u01aa\u01ab\u0005<\u0000\u0000\u01ab\u01ac\u0005" + + "<\u0000\u0000\u01ac\u01ad\u0005=\u0000\u0000\u01ad\u008b\u0001\u0000\u0000" + + "\u0000\u01ae\u01af\u0005>\u0000\u0000\u01af\u01b0\u0005>\u0000\u0000\u01b0" + + "\u01b1\u0005=\u0000\u0000\u01b1\u008d\u0001\u0000\u0000\u0000\u01b2\u01b3" + + "\u0005>\u0000\u0000\u01b3\u01b4\u0005>\u0000\u0000\u01b4\u01b5\u0005>" + + "\u0000\u0000\u01b5\u01b6\u0005=\u0000\u0000\u01b6\u008f\u0001\u0000\u0000" + + "\u0000\u01b7\u01b9\u00050\u0000\u0000\u01b8\u01ba\u0007\u0002\u0000\u0000" + + "\u01b9\u01b8\u0001\u0000\u0000\u0000\u01ba\u01bb\u0001\u0000\u0000\u0000" + + "\u01bb\u01b9\u0001\u0000\u0000\u0000\u01bb\u01bc\u0001\u0000\u0000\u0000" + + "\u01bc\u01be\u0001\u0000\u0000\u0000\u01bd\u01bf\u0007\u0003\u0000\u0000" + + "\u01be\u01bd\u0001\u0000\u0000\u0000\u01be\u01bf\u0001\u0000\u0000\u0000" + + "\u01bf\u0091\u0001\u0000\u0000\u0000\u01c0\u01c1\u00050\u0000\u0000\u01c1" + + "\u01c3\u0007\u0004\u0000\u0000\u01c2\u01c4\u0007\u0005\u0000\u0000\u01c3" + + "\u01c2\u0001\u0000\u0000\u0000\u01c4\u01c5\u0001\u0000\u0000\u0000\u01c5" + + "\u01c3\u0001\u0000\u0000\u0000\u01c5\u01c6\u0001\u0000\u0000\u0000\u01c6" + + "\u01c8\u0001\u0000\u0000\u0000\u01c7\u01c9\u0007\u0003\u0000\u0000\u01c8" + + "\u01c7\u0001\u0000\u0000\u0000\u01c8\u01c9\u0001\u0000\u0000\u0000\u01c9" + + "\u0093\u0001\u0000\u0000\u0000\u01ca\u01d3\u00050\u0000\u0000\u01cb\u01cf" + + "\u0007\u0006\u0000\u0000\u01cc\u01ce\u0007\u0007\u0000\u0000\u01cd\u01cc" + + "\u0001\u0000\u0000\u0000\u01ce\u01d1\u0001\u0000\u0000\u0000\u01cf\u01cd" + + "\u0001\u0000\u0000\u0000\u01cf\u01d0\u0001\u0000\u0000\u0000\u01d0\u01d3" + + "\u0001\u0000\u0000\u0000\u01d1\u01cf\u0001\u0000\u0000\u0000\u01d2\u01ca" + + "\u0001\u0000\u0000\u0000\u01d2\u01cb\u0001\u0000\u0000\u0000\u01d3\u01d5" + + "\u0001\u0000\u0000\u0000\u01d4\u01d6\u0007\b\u0000\u0000\u01d5\u01d4\u0001" + + "\u0000\u0000\u0000\u01d5\u01d6\u0001\u0000\u0000\u0000\u01d6\u0095\u0001" + + "\u0000\u0000\u0000\u01d7\u01e0\u00050\u0000\u0000\u01d8\u01dc\u0007\u0006" + + "\u0000\u0000\u01d9\u01db\u0007\u0007\u0000\u0000\u01da\u01d9\u0001\u0000" + + "\u0000\u0000\u01db\u01de\u0001\u0000\u0000\u0000\u01dc\u01da\u0001\u0000" + + "\u0000\u0000\u01dc\u01dd\u0001\u0000\u0000\u0000\u01dd\u01e0\u0001\u0000" + + "\u0000\u0000\u01de\u01dc\u0001\u0000\u0000\u0000\u01df\u01d7\u0001\u0000" + + "\u0000\u0000\u01df\u01d8\u0001\u0000\u0000\u0000\u01e0\u01e7\u0001\u0000" + + "\u0000\u0000\u01e1\u01e3\u0003\u0012\b\u0000\u01e2\u01e4\u0007\u0007\u0000" + + "\u0000\u01e3\u01e2\u0001\u0000\u0000\u0000\u01e4\u01e5\u0001\u0000\u0000" + + "\u0000\u01e5\u01e3\u0001\u0000\u0000\u0000\u01e5\u01e6\u0001\u0000\u0000" + + "\u0000\u01e6\u01e8\u0001\u0000\u0000\u0000\u01e7\u01e1\u0001\u0000\u0000" + + "\u0000\u01e7\u01e8\u0001\u0000\u0000\u0000\u01e8\u01f2\u0001\u0000\u0000" + + "\u0000\u01e9\u01eb\u0007\t\u0000\u0000\u01ea\u01ec\u0007\n\u0000\u0000" + + "\u01eb\u01ea\u0001\u0000\u0000\u0000\u01eb\u01ec\u0001\u0000\u0000\u0000" + + "\u01ec\u01ee\u0001\u0000\u0000\u0000\u01ed\u01ef\u0007\u0007\u0000\u0000" + + "\u01ee\u01ed\u0001\u0000\u0000\u0000\u01ef\u01f0\u0001\u0000\u0000\u0000" + + "\u01f0\u01ee\u0001\u0000\u0000\u0000\u01f0\u01f1\u0001\u0000\u0000\u0000" + + "\u01f1\u01f3\u0001\u0000\u0000\u0000\u01f2\u01e9\u0001\u0000\u0000\u0000" + + "\u01f2\u01f3\u0001\u0000\u0000\u0000\u01f3\u01f5\u0001\u0000\u0000\u0000" + + "\u01f4\u01f6\u0007\u000b\u0000\u0000\u01f5\u01f4\u0001\u0000\u0000\u0000" + + "\u01f5\u01f6\u0001\u0000\u0000\u0000\u01f6\u0097\u0001\u0000\u0000\u0000" + + "\u01f7\u01ff\u0005\"\u0000\u0000\u01f8\u01f9\u0005\\\u0000\u0000\u01f9" + + "\u01fe\u0005\"\u0000\u0000\u01fa\u01fb\u0005\\\u0000\u0000\u01fb\u01fe" + + "\u0005\\\u0000\u0000\u01fc\u01fe\b\f\u0000\u0000\u01fd\u01f8\u0001\u0000" + + "\u0000\u0000\u01fd\u01fa\u0001\u0000\u0000\u0000\u01fd\u01fc\u0001\u0000" + + "\u0000\u0000\u01fe\u0201\u0001\u0000\u0000\u0000\u01ff\u0200\u0001\u0000" + + "\u0000\u0000\u01ff\u01fd\u0001\u0000\u0000\u0000\u0200\u0202\u0001\u0000" + + "\u0000\u0000\u0201\u01ff\u0001\u0000\u0000\u0000\u0202\u0210\u0005\"\u0000" + + "\u0000\u0203\u020b\u0005\'\u0000\u0000\u0204\u0205\u0005\\\u0000\u0000" + + "\u0205\u020a\u0005\'\u0000\u0000\u0206\u0207\u0005\\\u0000\u0000\u0207" + + "\u020a\u0005\\\u0000\u0000\u0208\u020a\b\r\u0000\u0000\u0209\u0204\u0001" + + "\u0000\u0000\u0000\u0209\u0206\u0001\u0000\u0000\u0000\u0209\u0208\u0001" + + "\u0000\u0000\u0000\u020a\u020d\u0001\u0000\u0000\u0000\u020b\u020c\u0001" + + "\u0000\u0000\u0000\u020b\u0209\u0001\u0000\u0000\u0000\u020c\u020e\u0001" + + "\u0000\u0000\u0000\u020d\u020b\u0001\u0000\u0000\u0000\u020e\u0210\u0005" + + "\'\u0000\u0000\u020f\u01f7\u0001\u0000\u0000\u0000\u020f\u0203\u0001\u0000" + + "\u0000\u0000\u0210\u0099\u0001\u0000\u0000\u0000\u0211\u0215\u0005/\u0000" + + "\u0000\u0212\u0213\u0005\\\u0000\u0000\u0213\u0216\b\u000e\u0000\u0000" + + "\u0214\u0216\b\u000f\u0000\u0000\u0215\u0212\u0001\u0000\u0000\u0000\u0215" + + "\u0214\u0001\u0000\u0000\u0000\u0216\u0217\u0001\u0000\u0000\u0000\u0217" + + "\u0218\u0001\u0000\u0000\u0000\u0217\u0215\u0001\u0000\u0000\u0000\u0218" + + "\u0219\u0001\u0000\u0000\u0000\u0219\u021d\u0005/\u0000\u0000\u021a\u021c" + + "\u0007\u0010\u0000\u0000\u021b\u021a\u0001\u0000\u0000\u0000\u021c\u021f" + + "\u0001\u0000\u0000\u0000\u021d\u021b\u0001\u0000\u0000\u0000\u021d\u021e" + + "\u0001\u0000\u0000\u0000\u021e\u0220\u0001\u0000\u0000\u0000\u021f\u021d" + + "\u0001\u0000\u0000\u0000\u0220\u0221\u0004L\u0001\u0000\u0221\u009b\u0001" + + "\u0000\u0000\u0000\u0222\u0223\u0005t\u0000\u0000\u0223\u0224\u0005r\u0000" + + "\u0000\u0224\u0225\u0005u\u0000\u0000\u0225\u0226\u0005e\u0000\u0000\u0226" + + "\u009d\u0001\u0000\u0000\u0000\u0227\u0228\u0005f\u0000\u0000\u0228\u0229" + + "\u0005a\u0000\u0000\u0229\u022a\u0005l\u0000\u0000\u022a\u022b\u0005s" + + "\u0000\u0000\u022b\u022c\u0005e\u0000\u0000\u022c\u009f\u0001\u0000\u0000" + + "\u0000\u022d\u022e\u0005n\u0000\u0000\u022e\u022f\u0005u\u0000\u0000\u022f" + + "\u0230\u0005l\u0000\u0000\u0230\u0231\u0005l\u0000\u0000\u0231\u00a1\u0001" + + "\u0000\u0000\u0000\u0232\u0236\u0003\u00a4Q\u0000\u0233\u0234\u0003\n" + + "\u0004\u0000\u0234\u0235\u0003\f\u0005\u0000\u0235\u0237\u0001\u0000\u0000" + + "\u0000\u0236\u0233\u0001\u0000\u0000\u0000\u0237\u0238\u0001\u0000\u0000" + + "\u0000\u0238\u0236\u0001\u0000\u0000\u0000\u0238\u0239\u0001\u0000\u0000" + + "\u0000\u0239\u00a3\u0001\u0000\u0000\u0000\u023a\u0240\u0003\u00a6R\u0000" + + "\u023b\u023c\u0003\u0012\b\u0000\u023c\u023d\u0003\u00a6R\u0000\u023d" + + "\u023f\u0001\u0000\u0000\u0000\u023e\u023b\u0001\u0000\u0000\u0000\u023f" + + "\u0242\u0001\u0000\u0000\u0000\u0240\u023e\u0001\u0000\u0000\u0000\u0240" + + "\u0241\u0001\u0000\u0000\u0000\u0241\u0243\u0001\u0000\u0000\u0000\u0242" + + "\u0240\u0001\u0000\u0000\u0000\u0243\u0244\u0004Q\u0002\u0000\u0244\u00a5" + + "\u0001\u0000\u0000\u0000\u0245\u0249\u0007\u0011\u0000\u0000\u0246\u0248" + + "\u0007\u0012\u0000\u0000\u0247\u0246\u0001\u0000\u0000\u0000\u0248\u024b" + + "\u0001\u0000\u0000\u0000\u0249\u0247\u0001\u0000\u0000\u0000\u0249\u024a" + + "\u0001\u0000\u0000\u0000\u024a\u00a7\u0001\u0000\u0000\u0000\u024b\u0249" + + "\u0001\u0000\u0000\u0000\u024c\u024d\t\u0000\u0000\u0000\u024d\u024e\u0001" + + "\u0000\u0000\u0000\u024e\u024f\u0006S\u0000\u0000\u024f\u00a9\u0001\u0000" + + "\u0000\u0000\u0250\u0259\u00050\u0000\u0000\u0251\u0255\u0007\u0006\u0000" + + "\u0000\u0252\u0254\u0007\u0007\u0000\u0000\u0253\u0252\u0001\u0000\u0000" + + "\u0000\u0254\u0257\u0001\u0000\u0000\u0000\u0255\u0253\u0001\u0000\u0000" + + "\u0000\u0255\u0256\u0001\u0000\u0000\u0000\u0256\u0259\u0001\u0000\u0000" + + "\u0000\u0257\u0255\u0001\u0000\u0000\u0000\u0258\u0250\u0001\u0000\u0000" + + "\u0000\u0258\u0251\u0001\u0000\u0000\u0000\u0259\u025a\u0001\u0000\u0000" + + "\u0000\u025a\u025b\u0006T\u0002\u0000\u025b\u00ab\u0001\u0000\u0000\u0000" + + "\u025c\u0260\u0007\u0011\u0000\u0000\u025d\u025f\u0007\u0012\u0000\u0000" + + "\u025e\u025d\u0001\u0000\u0000\u0000\u025f\u0262\u0001\u0000\u0000\u0000" + + "\u0260\u025e\u0001\u0000\u0000\u0000\u0260\u0261\u0001\u0000\u0000\u0000" + + "\u0261\u0263\u0001\u0000\u0000\u0000\u0262\u0260\u0001\u0000\u0000\u0000" + + "\u0263\u0264\u0006U\u0002\u0000\u0264\u00ad\u0001\u0000\u0000\u0000#\u0000" + + "\u0001\u00b1\u00bb\u00c5\u00ca\u01bb\u01be\u01c5\u01c8\u01cf\u01d2\u01d5" + + "\u01dc\u01df\u01e5\u01e7\u01eb\u01f0\u01f2\u01f5\u01fd\u01ff\u0209\u020b" + + "\u020f\u0215\u0217\u021d\u0238\u0240\u0249\u0255\u0258\u0260\u0003\u0006" + + "\u0000\u0000\u0002\u0001\u0000\u0002\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/ValueIterator.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/ValueIterator.java new file mode 100644 index 000000000000..7f366bc3284a --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/ValueIterator.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.painless.api; + +import java.util.Iterator; + +/** + * An {@link Iterator} that can return primitive values + */ +public interface ValueIterator extends Iterator { + boolean nextBoolean(); + + byte nextByte(); + + short nextShort(); + + char nextChar(); + + int nextInt(); + + long nextLong(); + + float nextFloat(); + + double nextDouble(); +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index 2d84c9d2baab..db380f69c7c0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.painless.lookup; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.core.Strings; import org.elasticsearch.painless.Def; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.painless.spi.WhitelistClass; @@ -260,6 +261,14 @@ public void addPainlessClass(ClassLoader classLoader, String javaClassName, Map< addPainlessClass(clazz, annotations); } + private static IllegalArgumentException lookupException(String formatText, Object... args) { + return new IllegalArgumentException(Strings.format(formatText, args)); + } + + private static IllegalArgumentException lookupException(Throwable cause, String formatText, Object... args) { + return new IllegalArgumentException(Strings.format(formatText, args), cause); + } + public void addPainlessClass(Class clazz, Map, Object> annotations) { Objects.requireNonNull(clazz); Objects.requireNonNull(annotations); @@ -283,22 +292,18 @@ public void addPainlessClass(Class clazz, Map, Object> annotations) if (existingClass == null) { javaClassNamesToClasses.put(clazz.getName().intern(), clazz); } else if (existingClass != clazz) { - throw new IllegalArgumentException( - "class [" - + canonicalClassName - + "] " - + "cannot represent multiple java classes with the same name from different class loaders" + throw lookupException( + "class [%s] cannot represent multiple java classes with the same name from different class loaders", + canonicalClassName ); } existingClass = canonicalClassNamesToClasses.get(canonicalClassName); if (existingClass != null && existingClass != clazz) { - throw new IllegalArgumentException( - "class [" - + canonicalClassName - + "] " - + "cannot represent multiple java classes with the same name from different class loaders" + throw lookupException( + "class [%s] cannot represent multiple java classes with the same name from different class loaders", + canonicalClassName ); } @@ -333,22 +338,16 @@ public void addPainlessClass(Class clazz, Map, Object> annotations) if (annotations.get(AliasAnnotation.class)instanceof AliasAnnotation alias) { Class existing = canonicalClassNamesToClasses.put(alias.alias(), clazz); if (existing != null) { - throw new IllegalArgumentException( - "Cannot add alias [" + alias.alias() + "] for [" + clazz + "] that shadows class [" + existing + "]" - ); + throw lookupException("Cannot add alias [%s] for [%s] that shadows class [%s]", alias.alias(), clazz, existing); } } } } else if (importedClass != clazz) { - throw new IllegalArgumentException( - "imported class [" - + importedCanonicalClassName - + "] cannot represent multiple " - + "classes [" - + canonicalClassName - + "] and [" - + typeToCanonicalTypeName(importedClass) - + "]" + throw lookupException( + "imported class [%s] cannot represent multiple classes [%s] and [%s]", + importedCanonicalClassName, + canonicalClassName, + typeToCanonicalTypeName(importedClass) ); } else if (importClassName == false) { throw new IllegalArgumentException("inconsistent no_import parameter found for class [" + canonicalClassName + "]"); @@ -367,15 +366,11 @@ public void addPainlessConstructor( Class targetClass = canonicalClassNamesToClasses.get(targetCanonicalClassName); if (targetClass == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found" - + "for constructor [[" - + targetCanonicalClassName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "target class [%s] not found for constructor [[%s], %s]", + targetCanonicalClassName, + targetCanonicalClassName, + canonicalTypeNameParameters ); } @@ -385,15 +380,11 @@ public void addPainlessConstructor( Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException( - "type parameter [" - + canonicalTypeNameParameter - + "] not found " - + "for constructor [[" - + targetCanonicalClassName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "type parameter [%s] not found for constructor [[%s], %s]", + canonicalTypeNameParameter, + targetCanonicalClassName, + canonicalTypeNameParameters ); } @@ -415,15 +406,11 @@ public void addPainlessConstructor(Class targetClass, List> typePara PainlessClassBuilder painlessClassBuilder = classesToPainlessClassBuilders.get(targetClass); if (painlessClassBuilder == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found" - + "for constructor [[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "target class [%s] not found for constructor [[%s], %s]", + targetCanonicalClassName, + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -432,15 +419,11 @@ public void addPainlessConstructor(Class targetClass, List> typePara for (Class typeParameter : typeParameters) { if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] not found " - + "for constructor [[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for constructor [[%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -452,14 +435,11 @@ public void addPainlessConstructor(Class targetClass, List> typePara try { javaConstructor = targetClass.getConstructor(javaTypeParameters.toArray(Class[]::new)); } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException( - "reflection object not found for constructor " - + "[[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]", - nsme + throw lookupException( + nsme, + "reflection object not found for constructor [[%s], %s]", + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -468,14 +448,11 @@ public void addPainlessConstructor(Class targetClass, List> typePara try { methodHandle = lookup(targetClass).unreflectConstructor(javaConstructor); } catch (IllegalAccessException iae) { - throw new IllegalArgumentException( - "method handle not found for constructor " - + "[[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]", - iae + throw lookupException( + iae, + "method handle not found for constructor [[%s], %s]", + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -499,18 +476,12 @@ public void addPainlessConstructor(Class targetClass, List> typePara newPainlessConstructor = painlessConstructorCache.computeIfAbsent(newPainlessConstructor, Function.identity()); painlessClassBuilder.constructors.put(painlessConstructorKey.intern(), newPainlessConstructor); } else if (newPainlessConstructor.equals(existingPainlessConstructor) == false) { - throw new IllegalArgumentException( - "cannot add constructors with the same arity but are not equivalent for constructors " - + "[[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] and " - + "[[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(existingPainlessConstructor.typeParameters()) - + "]" + throw lookupException( + "cannot add constructors with the same arity but are not equivalent for constructors [[%s], %s] and [[%s], %s]", + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters), + targetCanonicalClassName, + typesToCanonicalTypeNames(existingPainlessConstructor.typeParameters()) ); } } @@ -535,17 +506,12 @@ public void addPainlessMethod( Class targetClass = canonicalClassNamesToClasses.get(targetCanonicalClassName); if (targetClass == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "target class [%s] not found for method [[%s], [%s], %s]", + targetCanonicalClassName, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -555,16 +521,13 @@ public void addPainlessMethod( augmentedClass = loadClass( classLoader, augmentedCanonicalClassName, - () -> "augmented class [" - + augmentedCanonicalClassName - + "] not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + () -> Strings.format( + "augmented class [%s] not found for method [[%s], [%s], %s]", + augmentedCanonicalClassName, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters + ) ); } @@ -574,17 +537,12 @@ public void addPainlessMethod( Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException( - "type parameter [" - + canonicalTypeNameParameter - + "] not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "type parameter [%s] not found for method [[%s], [%s], %s]", + canonicalTypeNameParameter, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -594,17 +552,12 @@ public void addPainlessMethod( Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException( - "return type [" - + returnCanonicalTypeName - + "] not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "return type [%s] not found for method [[%s], [%s], %s]", + returnCanonicalTypeName, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -641,17 +594,12 @@ public void addPainlessMethod( PainlessClassBuilder painlessClassBuilder = classesToPainlessClassBuilders.get(targetClass); if (painlessClassBuilder == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "target class [%s] not found for method [[%s], [%s], %s]", + targetCanonicalClassName, + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -665,17 +613,12 @@ public void addPainlessMethod( for (Class typeParameter : typeParameters) { if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] " - + "not found for method [[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for method [[%s], [%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -683,17 +626,12 @@ public void addPainlessMethod( } if (isValidType(returnType) == false) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(returnType) - + "] not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] not found for method [[%s], [%s], %s]", + typeToCanonicalTypeName(returnType), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -703,16 +641,12 @@ public void addPainlessMethod( try { javaMethod = targetClass.getMethod(methodName, javaTypeParameters.toArray(Class[]::new)); } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException( - "reflection object not found for method [[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]", - nsme + throw lookupException( + nsme, + "reflection object not found for method [[%s], [%s], %s]", + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } } else { @@ -720,33 +654,22 @@ public void addPainlessMethod( javaMethod = augmentedClass.getMethod(methodName, javaTypeParameters.toArray(Class[]::new)); if (Modifier.isStatic(javaMethod.getModifiers()) == false) { - throw new IllegalArgumentException( - "method [[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] with augmented class " - + "[" - + typeToCanonicalTypeName(augmentedClass) - + "] must be static" + throw lookupException( + "method [[%s], [%s], %s] with augmented class [%s] must be static", + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters), + typeToCanonicalTypeName(augmentedClass) ); } } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException( - "reflection object not found for method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] " - + "with augmented class [" - + typeToCanonicalTypeName(augmentedClass) - + "]", - nsme + throw lookupException( + nsme, + "reflection object not found for method [[%s], [%s], %s] with augmented class [%s]", + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters), + typeToCanonicalTypeName(augmentedClass) ); } } @@ -764,20 +687,13 @@ public void addPainlessMethod( } if (javaMethod.getReturnType() != typeToJavaType(returnType)) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(javaMethod.getReturnType()) - + "] " - + "does not match the specified returned type [" - + typeToCanonicalTypeName(returnType) - + "] " - + "for method [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] does not match the specified returned type [%s] for method [[%s], [%s], %s]", + typeToCanonicalTypeName(javaMethod.getReturnType()), + typeToCanonicalTypeName(returnType), + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -787,38 +703,26 @@ public void addPainlessMethod( try { methodHandle = lookup(targetClass).unreflect(javaMethod); } catch (IllegalAccessException iae) { - throw new IllegalArgumentException( - "method handle not found for method " - + "[[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "], " - + "with lookup [" - + lookup(targetClass) - + "]", - iae + throw lookupException( + iae, + "method handle not found for method [[%s], [%s], %s], with lookup [%s]", + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters), + lookup(targetClass) ); } } else { try { methodHandle = lookup(augmentedClass).unreflect(javaMethod); } catch (IllegalAccessException iae) { - throw new IllegalArgumentException( - "method handle not found for method " - + "[[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" - + "with augmented class [" - + typeToCanonicalTypeName(augmentedClass) - + "]", - iae + throw lookupException( + iae, + "method handle not found for method [[%s], [%s], %s] with augmented class [%s]", + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters), + typeToCanonicalTypeName(augmentedClass) ); } } @@ -852,28 +756,17 @@ public void addPainlessMethod( painlessClassBuilder.methods.put(painlessMethodKey.intern(), newPainlessMethod); } } else if (newPainlessMethod.equals(existingPainlessMethod) == false) { - throw new IllegalArgumentException( + throw lookupException( "cannot add methods with the same name and arity but are not equivalent for methods " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(returnType) - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] and " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(existingPainlessMethod.returnType()) - + "], " - + typesToCanonicalTypeNames(existingPainlessMethod.typeParameters()) - + "]" + + "[[%s], [%s], [%s], %s] and [[%s], [%s], [%s], %s]", + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(returnType), + typesToCanonicalTypeNames(typeParameters), + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(existingPainlessMethod.returnType()), + typesToCanonicalTypeNames(existingPainlessMethod.typeParameters()) ); } } @@ -895,17 +788,12 @@ public void addPainlessField( Class targetClass = canonicalClassNamesToClasses.get(targetCanonicalClassName); if (targetClass == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found for field " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + canonicalTypeNameParameter - + "]]" + throw lookupException( + "target class [%s] not found for field [[%s], [%s], [%s]]", + targetCanonicalClassName, + targetCanonicalClassName, + fieldName, + canonicalTypeNameParameter ); } @@ -919,29 +807,23 @@ public void addPainlessField( augmentedClass = loadClass( classLoader, augmentedCanonicalClassName, - () -> "augmented class [" - + augmentedCanonicalClassName - + "] not found for field " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "]" + () -> Strings.format( + "augmented class [%s] not found for field [[%s], [%s]]", + augmentedCanonicalClassName, + targetCanonicalClassName, + fieldName + ) ); } Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException( - "type parameter [" - + canonicalTypeNameParameter - + "] not found " - + "for field [[" - + targetCanonicalClassName - + "], [" - + fieldName - + "]" + throw lookupException( + "type parameter [%s] not found for field [[%s], [%s]]", + canonicalTypeNameParameter, + targetCanonicalClassName, + fieldName ); } @@ -976,32 +858,22 @@ public void addPainlessField( PainlessClassBuilder painlessClassBuilder = classesToPainlessClassBuilders.get(targetClass); if (painlessClassBuilder == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found for field " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + typeToCanonicalTypeName(typeParameter) - + "]]" + throw lookupException( + "target class [%s] not found for field [[%s], [%s], [%s]]", + targetCanonicalClassName, + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(typeParameter) ); } if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] not found for field " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + typeToCanonicalTypeName(typeParameter) - + "]]" + throw lookupException( + "type parameter [%s] not found for field [[%s], [%s], [%s]]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(typeParameter) ); } @@ -1011,16 +883,12 @@ public void addPainlessField( try { javaField = targetClass.getField(fieldName); } catch (NoSuchFieldException nsfe) { - throw new IllegalArgumentException( - "reflection object not found for field " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + typeToCanonicalTypeName(typeParameter) - + "]]", - nsfe + throw lookupException( + nsfe, + "reflection object not found for field [[%s], [%s], [%s]]", + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(typeParameter) ); } } else { @@ -1028,48 +896,32 @@ public void addPainlessField( javaField = augmentedClass.getField(fieldName); if (Modifier.isStatic(javaField.getModifiers()) == false || Modifier.isFinal(javaField.getModifiers()) == false) { - throw new IllegalArgumentException( - "field [[" - + targetCanonicalClassName - + "], [" - + fieldName - + "] " - + "with augmented class [" - + typeToCanonicalTypeName(augmentedClass) - + "] must be static and final" + throw lookupException( + "field [[%s], [%s]] with augmented class [%s] must be static and final", + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(augmentedClass) ); } } catch (NoSuchFieldException nsfe) { - throw new IllegalArgumentException( - "reflection object not found for field " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + typeToCanonicalTypeName(typeParameter) - + "]]" - + "with augmented class [" - + typeToCanonicalTypeName(augmentedClass) - + "]", - nsfe + throw lookupException( + nsfe, + "reflection object not found for field [[%s], [%s], [%s]] with augmented class [%s]", + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(typeParameter), + typeToCanonicalTypeName(augmentedClass) ); } } if (javaField.getType() != typeToJavaType(typeParameter)) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(javaField.getType()) - + "] " - + "does not match the specified type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] " - + "for field [[" - + targetCanonicalClassName - + "], [" - + fieldName - + "]" + throw lookupException( + "type parameter [%s] does not match the specified type parameter [%s] for field [[%s], [%s]]", + typeToCanonicalTypeName(javaField.getType()), + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + fieldName ); } @@ -1094,26 +946,18 @@ public void addPainlessField( PainlessField newPainlessField = new PainlessField(javaField, typeParameter, annotations, methodHandleGetter, null); if (existingPainlessField == null) { - newPainlessField = painlessFieldCache.computeIfAbsent(newPainlessField, key -> key); + newPainlessField = painlessFieldCache.computeIfAbsent(newPainlessField, Function.identity()); painlessClassBuilder.staticFields.put(painlessFieldKey.intern(), newPainlessField); } else if (newPainlessField.equals(existingPainlessField) == false) { - throw new IllegalArgumentException( - "cannot add fields with the same name but are not equivalent for fields " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + typeToCanonicalTypeName(typeParameter) - + "] and " - + "[[" - + targetCanonicalClassName - + "], [" - + existingPainlessField.javaField().getName() - + "], " - + typeToCanonicalTypeName(existingPainlessField.typeParameter()) - + "] " - + "with the same name and different type parameters" + throw lookupException( + "cannot add fields with the same name but are not equivalent for fields [[%s], [%s], [%s]] and [[%s], [%s], [%s]]" + + " with the same name and different type parameters", + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + existingPainlessField.javaField().getName(), + typeToCanonicalTypeName(existingPainlessField.typeParameter()) ); } } else { @@ -1140,23 +984,15 @@ public void addPainlessField( newPainlessField = painlessFieldCache.computeIfAbsent(newPainlessField, key -> key); painlessClassBuilder.fields.put(painlessFieldKey.intern(), newPainlessField); } else if (newPainlessField.equals(existingPainlessField) == false) { - throw new IllegalArgumentException( - "cannot add fields with the same name but are not equivalent for fields " - + "[[" - + targetCanonicalClassName - + "], [" - + fieldName - + "], [" - + typeToCanonicalTypeName(typeParameter) - + "] and " - + "[[" - + targetCanonicalClassName - + "], [" - + existingPainlessField.javaField().getName() - + "], " - + typeToCanonicalTypeName(existingPainlessField.typeParameter()) - + "] " - + "with the same name and different type parameters" + throw lookupException( + "cannot add fields with the same name but are not equivalent for fields [[%s], [%s], [%s]] and [[%s], [%s], [%s]]" + + " with the same name and different type parameters", + targetCanonicalClassName, + fieldName, + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + existingPainlessField.javaField().getName(), + typeToCanonicalTypeName(existingPainlessField.typeParameter()) ); } } @@ -1180,38 +1016,18 @@ public void addImportedPainlessMethod( Class targetClass = loadClass(classLoader, targetJavaClassName, () -> "class [" + targetJavaClassName + "] not found"); String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); - if (targetClass == null) { - throw new IllegalArgumentException( - "target class [" - + targetCanonicalClassName - + "] not found for imported method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" - ); - } - List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException( - "type parameter [" - + canonicalTypeNameParameter - + "] not found for imported method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "type parameter [%s] not found for imported method [[%s], [%s], %s]", + canonicalTypeNameParameter, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -1221,17 +1037,12 @@ public void addImportedPainlessMethod( Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException( - "return type [" - + returnCanonicalTypeName - + "] not found for imported method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "return type [%s] not found for imported method [[%s], [%s], %s]", + returnCanonicalTypeName, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -1260,11 +1071,9 @@ public void addImportedPainlessMethod( if (existingTargetClass == null) { javaClassNamesToClasses.put(targetClass.getName().intern(), targetClass); } else if (existingTargetClass != targetClass) { - throw new IllegalArgumentException( - "class [" - + targetCanonicalClassName - + "] " - + "cannot represent multiple java classes with the same name from different class loaders" + throw lookupException( + "class [%s] cannot represent multiple java classes with the same name from different class loaders", + targetCanonicalClassName ); } @@ -1279,17 +1088,12 @@ public void addImportedPainlessMethod( for (Class typeParameter : typeParameters) { if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] " - + "not found for imported method [[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for imported method [[%s], [%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1297,17 +1101,12 @@ public void addImportedPainlessMethod( } if (isValidType(returnType) == false) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(returnType) - + "] not found for imported method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] not found for imported method [[%s], [%s], %s]", + typeToCanonicalTypeName(returnType), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1316,46 +1115,32 @@ public void addImportedPainlessMethod( try { javaMethod = targetClass.getMethod(methodName, javaTypeParameters.toArray(new Class[typeParametersSize])); } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException( - "imported method reflection object [[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] not found", - nsme + throw lookupException( + nsme, + "imported method reflection object [[%s], [%s], %s] not found", + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } if (javaMethod.getReturnType() != typeToJavaType(returnType)) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(javaMethod.getReturnType()) - + "] " - + "does not match the specified returned type [" - + typeToCanonicalTypeName(returnType) - + "] " - + "for imported method [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] does not match the specified returned type [%s] for imported method [[%s], [%s], %s]", + typeToCanonicalTypeName(javaMethod.getReturnType()), + typeToCanonicalTypeName(returnType), + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } if (Modifier.isStatic(javaMethod.getModifiers()) == false) { - throw new IllegalArgumentException( - "imported method [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] must be static" + throw lookupException( + "imported method [[%s], [%s], %s] must be static", + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1374,16 +1159,12 @@ public void addImportedPainlessMethod( try { methodHandle = lookup(targetClass).unreflect(javaMethod); } catch (IllegalAccessException iae) { - throw new IllegalArgumentException( - "imported method handle [[" - + targetClass.getCanonicalName() - + "], " - + "[" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] not found", - iae + throw lookupException( + iae, + "imported method handle [[%s], [%s], %s] not found", + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1404,29 +1185,17 @@ public void addImportedPainlessMethod( newImportedPainlessMethod = painlessMethodCache.computeIfAbsent(newImportedPainlessMethod, key -> key); painlessMethodKeysToImportedPainlessMethods.put(painlessMethodKey.intern(), newImportedPainlessMethod); } else if (newImportedPainlessMethod.equals(existingImportedPainlessMethod) == false) { - throw new IllegalArgumentException( - "cannot add imported methods with the same name and arity " - + "but do not have equivalent methods " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(returnType) - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] and " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(existingImportedPainlessMethod.returnType()) - + "], " - + typesToCanonicalTypeNames(existingImportedPainlessMethod.typeParameters()) - + "]" + throw lookupException( + "cannot add imported methods with the same name and arity but do not have equivalent methods " + + "[[%s], [%s], [%s], %s] and [[%s], [%s], [%s], %s]", + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(returnType), + typesToCanonicalTypeNames(typeParameters), + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(existingImportedPainlessMethod.returnType()), + typesToCanonicalTypeNames(existingImportedPainlessMethod.typeParameters()) ); } } @@ -1454,17 +1223,12 @@ public void addPainlessClassBinding( Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException( - "type parameter [" - + canonicalTypeNameParameter - + "] not found for class binding " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "type parameter [%s] not found for class binding [[%s], [%s], %s]", + canonicalTypeNameParameter, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -1474,17 +1238,12 @@ public void addPainlessClassBinding( Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException( - "return type [" - + returnCanonicalTypeName - + "] not found for class binding " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "return type [%s] not found for class binding [[%s], [%s], %s]", + returnCanonicalTypeName, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -1513,11 +1272,9 @@ public void addPainlessClassBinding( if (existingTargetClass == null) { javaClassNamesToClasses.put(targetClass.getName().intern(), targetClass); } else if (existingTargetClass != targetClass) { - throw new IllegalArgumentException( - "class [" - + targetCanonicalClassName - + "] " - + "cannot represent multiple java classes with the same name from different class loaders" + throw lookupException( + "class [%s] cannot represent multiple java classes with the same name from different class loaders", + targetCanonicalClassName ); } @@ -1546,46 +1303,32 @@ public void addPainlessClassBinding( Class typeParameter = typeParameters.get(typeParameterIndex); if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] not found " - + "for class binding [[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for class binding [[%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } Class javaTypeParameter = constructorParameterTypes[typeParameterIndex]; if (isValidType(javaTypeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] not found " - + "for class binding [[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for class binding [[%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } if (javaTypeParameter != typeToJavaType(typeParameter)) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(javaTypeParameter) - + "] " - + "does not match the specified type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] " - + "for class binding [[" - + targetClass.getCanonicalName() - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] does not match the specified type parameter [%s] for class binding [[%s], %s]", + typeToCanonicalTypeName(javaTypeParameter), + typeToCanonicalTypeName(typeParameter), + targetClass.getCanonicalName(), + typesToCanonicalTypeNames(typeParameters) ); } } @@ -1623,80 +1366,54 @@ public void addPainlessClassBinding( Class typeParameter = typeParameters.get(constructorParameterTypes.length + typeParameterIndex); if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] not found " - + "for class binding [[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for class binding [[%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } Class javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex]; if (isValidType(javaTypeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] not found " - + "for class binding [[" - + targetCanonicalClassName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for class binding [[%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + typesToCanonicalTypeNames(typeParameters) ); } if (javaTypeParameter != typeToJavaType(typeParameter)) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(javaTypeParameter) - + "] " - + "does not match the specified type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] " - + "for class binding [[" - + targetClass.getCanonicalName() - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] does not match the specified type parameter [%s] for class binding [[%s], %s]", + typeToCanonicalTypeName(javaTypeParameter), + typeToCanonicalTypeName(typeParameter), + targetClass.getCanonicalName(), + typesToCanonicalTypeNames(typeParameters) ); } } if (isValidType(returnType) == false) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(returnType) - + "] not found for class binding " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] not found for class binding [[%s], [%s], %s]", + typeToCanonicalTypeName(returnType), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } if (javaMethod.getReturnType() != typeToJavaType(returnType)) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(javaMethod.getReturnType()) - + "] " - + "does not match the specified returned type [" - + typeToCanonicalTypeName(returnType) - + "] " - + "for class binding [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] does not match the specified returned type [%s] for class binding [[%s], [%s], %s]", + typeToCanonicalTypeName(javaMethod.getReturnType()), + typeToCanonicalTypeName(returnType), + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1711,14 +1428,11 @@ public void addPainlessClassBinding( } if (Modifier.isStatic(javaMethod.getModifiers())) { - throw new IllegalArgumentException( - "class binding [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] cannot be static" + throw lookupException( + "class binding [[%s], [%s], %s] cannot be static", + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1735,31 +1449,17 @@ public void addPainlessClassBinding( newPainlessClassBinding = painlessClassBindingCache.computeIfAbsent(newPainlessClassBinding, Function.identity()); painlessMethodKeysToPainlessClassBindings.put(painlessMethodKey.intern(), newPainlessClassBinding); } else if (newPainlessClassBinding.equals(existingPainlessClassBinding) == false) { - throw new IllegalArgumentException( - "cannot add class bindings with the same name and arity " - + "but do not have equivalent methods " - + "[[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(returnType) - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] and " - + "[[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(existingPainlessClassBinding.returnType()) - + "], " - + typesToCanonicalTypeNames(existingPainlessClassBinding.typeParameters()) - + "]" + throw lookupException( + "cannot add class bindings with the same name and arity but do not have equivalent methods " + + "[[%s], [%s], [%s], %s] and [[%s], [%s], [%s], %s]", + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(returnType), + typesToCanonicalTypeNames(typeParameters), + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(existingPainlessClassBinding.returnType()), + typesToCanonicalTypeNames(existingPainlessClassBinding.typeParameters()) ); } } @@ -1785,17 +1485,12 @@ public void addPainlessInstanceBinding( Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException( - "type parameter [" - + canonicalTypeNameParameter - + "] not found for instance binding " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "type parameter [%s] not found for instance binding [[%s], [%s], %s]", + canonicalTypeNameParameter, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -1805,17 +1500,12 @@ public void addPainlessInstanceBinding( Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException( - "return type [" - + returnCanonicalTypeName - + "] not found for class binding " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + canonicalTypeNameParameters - + "]" + throw lookupException( + "return type [%s] not found for class binding [[%s], [%s], %s]", + returnCanonicalTypeName, + targetCanonicalClassName, + methodName, + canonicalTypeNameParameters ); } @@ -1846,11 +1536,9 @@ public void addPainlessInstanceBinding( if (existingTargetClass == null) { javaClassNamesToClasses.put(targetClass.getName().intern(), targetClass); } else if (existingTargetClass != targetClass) { - throw new IllegalArgumentException( - "class [" - + targetCanonicalClassName - + "] " - + "cannot represent multiple java classes with the same name from different class loaders" + throw lookupException( + "class [%s] cannot represent multiple java classes with the same name from different class loaders", + targetCanonicalClassName ); } @@ -1865,17 +1553,12 @@ public void addPainlessInstanceBinding( for (Class typeParameter : typeParameters) { if (isValidType(typeParameter) == false) { - throw new IllegalArgumentException( - "type parameter [" - + typeToCanonicalTypeName(typeParameter) - + "] " - + "not found for instance binding [[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "type parameter [%s] not found for instance binding [[%s], [%s], %s]", + typeToCanonicalTypeName(typeParameter), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1883,17 +1566,12 @@ public void addPainlessInstanceBinding( } if (isValidType(returnType) == false) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(returnType) - + "] not found for imported method " - + "[[" - + targetCanonicalClassName - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] not found for imported method [[%s], [%s], %s]", + typeToCanonicalTypeName(returnType), + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1902,46 +1580,32 @@ public void addPainlessInstanceBinding( try { javaMethod = targetClass.getMethod(methodName, javaTypeParameters.toArray(new Class[typeParametersSize])); } catch (NoSuchMethodException nsme) { - throw new IllegalArgumentException( - "instance binding reflection object [[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] not found", - nsme + throw lookupException( + nsme, + "instance binding reflection object [[%s], [%s], %s] not found", + targetCanonicalClassName, + methodName, + typesToCanonicalTypeNames(typeParameters) ); } if (javaMethod.getReturnType() != typeToJavaType(returnType)) { - throw new IllegalArgumentException( - "return type [" - + typeToCanonicalTypeName(javaMethod.getReturnType()) - + "] " - + "does not match the specified returned type [" - + typeToCanonicalTypeName(returnType) - + "] " - + "for instance binding [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "]" + throw lookupException( + "return type [%s] does not match the specified returned type [%s] for instance binding [[%s], [%s], %s]", + typeToCanonicalTypeName(javaMethod.getReturnType()), + typeToCanonicalTypeName(returnType), + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } if (Modifier.isStatic(javaMethod.getModifiers())) { - throw new IllegalArgumentException( - "instance binding [[" - + targetClass.getCanonicalName() - + "], [" - + methodName - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "] cannot be static" + throw lookupException( + "instance binding [[%s], [%s], %s] cannot be static", + targetClass.getCanonicalName(), + methodName, + typesToCanonicalTypeNames(typeParameters) ); } @@ -1968,34 +1632,19 @@ public void addPainlessInstanceBinding( newPainlessInstanceBinding = painlessInstanceBindingCache.computeIfAbsent(newPainlessInstanceBinding, key -> key); painlessMethodKeysToPainlessInstanceBindings.put(painlessMethodKey.intern(), newPainlessInstanceBinding); } else if (newPainlessInstanceBinding.equals(existingPainlessInstanceBinding) == false) { - throw new IllegalArgumentException( - "cannot add instances bindings with the same name and arity " - + "but do not have equivalent methods " - + "[[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(returnType) - + "], " - + typesToCanonicalTypeNames(typeParameters) - + "], " - + painlessAnnotations - + " and " - + "[[" - + targetCanonicalClassName - + "], " - + "[" - + methodName - + "], " - + "[" - + typeToCanonicalTypeName(existingPainlessInstanceBinding.returnType()) - + "], " - + typesToCanonicalTypeNames(existingPainlessInstanceBinding.typeParameters()) - + "], " - + existingPainlessInstanceBinding.annotations() + throw lookupException( + "cannot add instances bindings with the same name and arity but do not have equivalent methods " + + "[[%s], [%s], [%s], %s], %s and [[%s], [%s], [%s], %s], %s", + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(returnType), + typesToCanonicalTypeNames(typeParameters), + painlessAnnotations, + targetCanonicalClassName, + methodName, + typeToCanonicalTypeName(existingPainlessInstanceBinding.returnType()), + typesToCanonicalTypeNames(existingPainlessInstanceBinding.typeParameters()), + existingPainlessInstanceBinding.annotations() ); } } @@ -2014,20 +1663,20 @@ public PainlessLookup build() { if (javaClassNamesToClasses.values().containsAll(canonicalClassNamesToClasses.values()) == false) { throw new IllegalArgumentException( - "the values of java class names to classes " + "must be a superset of the values of canonical class names to classes" + "the values of java class names to classes must be a superset of the values of canonical class names to classes" ); } if (javaClassNamesToClasses.values().containsAll(classesToPainlessClasses.keySet()) == false) { throw new IllegalArgumentException( - "the values of java class names to classes " + "must be a superset of the keys of classes to painless classes" + "the values of java class names to classes must be a superset of the keys of classes to painless classes" ); } if (canonicalClassNamesToClasses.values().containsAll(classesToPainlessClasses.keySet()) == false || classesToPainlessClasses.keySet().containsAll(canonicalClassNamesToClasses.values()) == false) { throw new IllegalArgumentException( - "the values of canonical class names to classes " + "must have the same classes as the keys of classes to painless classes" + "the values of canonical class names to classes must have the same classes as the keys of classes to painless classes" ); } @@ -2048,7 +1697,7 @@ private void buildPainlessClassHierarchy() { } for (Class subClass : classesToPainlessClassBuilders.keySet()) { - List> superInterfaces = new ArrayList<>(Arrays.asList(subClass.getInterfaces())); + Deque> superInterfaces = new ArrayDeque<>(Arrays.asList(subClass.getInterfaces())); // we check for Object.class as part of the allow listed classes because // it is possible for the compiler to work without Object @@ -2087,7 +1736,7 @@ private void buildPainlessClassHierarchy() { Set> resolvedInterfaces = new HashSet<>(); while (superInterfaces.isEmpty() == false) { - Class superInterface = superInterfaces.remove(0); + Class superInterface = superInterfaces.removeFirst(); if (resolvedInterfaces.add(superInterface)) { if (classesToPainlessClassBuilders.containsKey(superInterface)) { @@ -2119,12 +1768,10 @@ private void setFunctionalInterfaceMethod(Class targetClass, PainlessClassBui } if (javaMethods.size() != 1 && targetClass.isAnnotationPresent(FunctionalInterface.class)) { - throw new IllegalArgumentException( - "class [" - + typeToCanonicalTypeName(targetClass) - + "] " - + "is illegally marked as a FunctionalInterface with java methods " - + javaMethods + throw lookupException( + "class [%s] is illegally marked as a FunctionalInterface with java methods %s", + typeToCanonicalTypeName(targetClass), + javaMethods ); } else if (javaMethods.size() == 1) { java.lang.reflect.Method javaMethod = javaMethods.get(0); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java index be8bb9902a9a..766714a45469 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupUtility.java @@ -340,11 +340,12 @@ public static String buildPainlessFieldKey(String fieldName) { * derived from an {@link org.elasticsearch.painless.spi.annotation.InjectConstantAnnotation}. */ public static Object[] buildInjections(PainlessMethod painlessMethod, Map constants) { - if (painlessMethod.annotations().containsKey(InjectConstantAnnotation.class) == false) { + InjectConstantAnnotation injects = (InjectConstantAnnotation) painlessMethod.annotations().get(InjectConstantAnnotation.class); + if (injects == null) { return new Object[0]; } - List names = ((InjectConstantAnnotation) painlessMethod.annotations().get(InjectConstantAnnotation.class)).injects(); + List names = injects.injects(); Object[] injections = new Object[names.size()]; for (int i = 0; i < names.size(); i++) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java index df57f9d3da65..2097a3e2995f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultConstantFoldingOptimizationPhase.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless.phase; +import org.elasticsearch.core.Strings; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ir.BinaryMathNode; @@ -45,6 +46,42 @@ */ public class DefaultConstantFoldingOptimizationPhase extends IRExpressionModifyingVisitor { + private static IllegalStateException unaryError(String type, String operation, String constant) { + return new IllegalStateException( + Strings.format( + "constant folding error: unexpected type [%s] for unary operation [%s] on constant [%s]", + type, + operation, + constant + ) + ); + } + + private static IllegalStateException binaryError(String type, String operation, String constant1, String constant2) { + return error(type, "binary", operation, constant1, constant2); + } + + private static IllegalStateException booleanError(String type, String operation, String constant1, String constant2) { + return error(type, "boolean", operation, constant1, constant2); + } + + private static IllegalStateException comparisonError(String type, String operation, String constant1, String constant2) { + return error(type, "comparison", operation, constant1, constant2); + } + + private static IllegalStateException error(String type, String opType, String operation, String constant1, String constant2) { + return new IllegalStateException( + Strings.format( + "constant folding error: unexpected type [%s] for %s operation [%s] on constants [%s] and [%s]", + type, + opType, + operation, + constant1, + constant2 + ) + ); + } + @Override public void visitUnaryMath(UnaryMathNode irUnaryMathNode, Consumer scope) { irUnaryMathNode.getChildNode().visit(this, irUnaryMathNode::setChildNode); @@ -67,17 +104,10 @@ public void visitUnaryMath(UnaryMathNode irUnaryMathNode, Consumer sco } else { throw irBooleanNode.getLocation() .createError( - new IllegalStateException( - "constant folding error: " - + "unexpected type [" - + PainlessLookupUtility.typeToCanonicalTypeName(type) - + "] for " - + "binary operation [" - + operation.symbol - + "] on " - + "constants [" - + irLeftConstantNode.getDecorationString(IRDConstant.class) - + "] " - + "and [" - + irRightConstantNode.getDecorationString(IRDConstant.class) - + "]" + binaryError( + PainlessLookupUtility.typeToCanonicalTypeName(type), + operation.symbol, + irLeftConstantNode.getDecorationString(IRDConstant.class), + irRightConstantNode.getDecorationString(IRDConstant.class) ) ); } @@ -580,20 +488,11 @@ public void visitBoolean(BooleanNode irBooleanNode, Consumer sco } else { throw irBooleanNode.getLocation() .createError( - new IllegalStateException( - "constant folding error: " - + "unexpected type [" - + PainlessLookupUtility.typeToCanonicalTypeName(type) - + "] for " - + "boolean operation [" - + operation.symbol - + "] on " - + "constants [" - + irLeftConstantNode.getDecorationString(IRDConstant.class) - + "] " - + "and [" - + irRightConstantNode.getDecorationString(IRDConstant.class) - + "]" + booleanError( + PainlessLookupUtility.typeToCanonicalTypeName(type), + operation.symbol, + irLeftConstantNode.getDecorationString(IRDConstant.class), + irRightConstantNode.getDecorationString(IRDConstant.class) ) ); } @@ -687,20 +586,11 @@ public void visitComparison(ComparisonNode irComparisonNode, Consumer { @@ -551,6 +561,8 @@ public void visitForEachSubIterableLoop(ForEachSubIterableNode irForEachSubItera MethodWriter methodWriter = writeScope.getMethodWriter(); methodWriter.writeStatementOffset(irForEachSubIterableNode.getLocation()); + PainlessMethod painlessMethod = irForEachSubIterableNode.getDecorationValue(IRDMethod.class); + Variable variable = writeScope.defineVariable( irForEachSubIterableNode.getDecorationValue(IRDVariableType.class), irForEachSubIterableNode.getDecorationValue(IRDVariableName.class) @@ -562,10 +574,8 @@ public void visitForEachSubIterableLoop(ForEachSubIterableNode irForEachSubItera visit(irForEachSubIterableNode.getConditionNode(), writeScope); - PainlessMethod painlessMethod = irForEachSubIterableNode.getDecorationValue(IRDMethod.class); - if (painlessMethod == null) { - Type methodType = Type.getMethodType(Type.getType(Iterator.class), Type.getType(Object.class)); + Type methodType = Type.getMethodType(Type.getType(ValueIterator.class), Type.getType(Object.class)); methodWriter.invokeDefCall("iterator", methodType, DefBootstrap.ITERATOR); } else { methodWriter.invokeMethodCall(painlessMethod); @@ -583,8 +593,22 @@ public void visitForEachSubIterableLoop(ForEachSubIterableNode irForEachSubItera methodWriter.ifZCmp(MethodWriter.EQ, end); methodWriter.visitVarInsn(iterator.getAsmType().getOpcode(Opcodes.ILOAD), iterator.getSlot()); - methodWriter.invokeInterface(ITERATOR_TYPE, ITERATOR_NEXT); - methodWriter.writeCast(irForEachSubIterableNode.getDecorationValue(IRDCast.class)); + if (painlessMethod != null || variable.getType().isPrimitive() == false) { + methodWriter.invokeInterface(ITERATOR_TYPE, ITERATOR_NEXT); + methodWriter.writeCast(irForEachSubIterableNode.getDecorationValue(IRDCast.class)); + } else { + switch (variable.getAsmType().getSort()) { + case Type.BOOLEAN -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_BOOLEAN); + case Type.BYTE -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_BYTE); + case Type.SHORT -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_SHORT); + case Type.CHAR -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_CHAR); + case Type.INT -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_INT); + case Type.LONG -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_LONG); + case Type.FLOAT -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_FLOAT); + case Type.DOUBLE -> methodWriter.invokeInterface(VALUE_ITERATOR_TYPE, VALUE_ITERATOR_NEXT_DOUBLE); + default -> throw new IllegalArgumentException("Unknown primitive iteration variable type " + variable.getAsmType()); + } + } methodWriter.visitVarInsn(variable.getAsmType().getOpcode(Opcodes.ISTORE), variable.getSlot()); visit(irForEachSubIterableNode.getBlockNode(), writeScope.newLoopScope(begin, end)); @@ -788,12 +812,11 @@ public void visitUnaryMath(UnaryMathNode irUnaryMathNode, WriteScope writeScope) methodWriter.push(-1L); } else { throw new IllegalStateException( - "unexpected unary math operation [" - + operation - + "] " - + "for type [" - + irUnaryMathNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected unary math operation [%s] for type [%s]", + operation, + irUnaryMathNode.getDecorationString(IRDExpressionType.class) + ) ); } @@ -813,12 +836,11 @@ public void visitUnaryMath(UnaryMathNode irUnaryMathNode, WriteScope writeScope) } } else { throw new IllegalStateException( - "unexpected unary math operation [" - + operation - + "] " - + "for type [" - + irUnaryMathNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected unary math operation [%s] for type [%s]", + operation, + irUnaryMathNode.getDecorationString(IRDExpressionType.class) + ) ); } } @@ -845,12 +867,11 @@ public void visitBinaryMath(BinaryMathNode irBinaryMathNode, WriteScope writeSco methodWriter.invokeVirtual(Type.getType(Matcher.class), WriterConstants.MATCHER_MATCHES); } else { throw new IllegalStateException( - "unexpected binary math operation [" - + operation - + "] " - + "for type [" - + irBinaryMathNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected binary math operation [%s] for type [%s]", + operation, + irBinaryMathNode.getDecorationString(IRDExpressionType.class) + ) ); } } else { @@ -973,24 +994,22 @@ public void visitComparison(ComparisonNode irComparisonNode, WriteScope writeSco if (comparisonType == void.class || comparisonType == byte.class || comparisonType == short.class || comparisonType == char.class) { throw new IllegalStateException( - "unexpected comparison operation [" - + operation - + "] " - + "for type [" - + irComparisonNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected comparison operation [%s] for type [%s]", + operation, + irComparisonNode.getDecorationString(IRDExpressionType.class) + ) ); } else if (comparisonType == boolean.class) { if (eq) methodWriter.ifCmp(type, MethodWriter.EQ, jump); else if (ne) methodWriter.ifCmp(type, MethodWriter.NE, jump); else { throw new IllegalStateException( - "unexpected comparison operation [" - + operation - + "] " - + "for type [" - + irComparisonNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected comparison operation [%s] for type [%s]", + operation, + irComparisonNode.getDecorationString(IRDExpressionType.class) + ) ); } } else if (comparisonType == int.class @@ -1005,12 +1024,11 @@ public void visitComparison(ComparisonNode irComparisonNode, WriteScope writeSco else if (gte) methodWriter.ifCmp(type, MethodWriter.GE, jump); else { throw new IllegalStateException( - "unexpected comparison operation [" - + operation - + "] " - + "for type [" - + irComparisonNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected comparison operation [%s] for type [%s]", + operation, + irComparisonNode.getDecorationString(IRDExpressionType.class) + ) ); } @@ -1054,12 +1072,11 @@ public void visitComparison(ComparisonNode irComparisonNode, WriteScope writeSco writejump = false; } else { throw new IllegalStateException( - "unexpected comparison operation [" - + operation - + "] " - + "for type [" - + irComparisonNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected comparison operation [%s] for type [%s]", + operation, + irComparisonNode.getDecorationString(IRDExpressionType.class) + ) ); } } else { @@ -1083,12 +1100,11 @@ public void visitComparison(ComparisonNode irComparisonNode, WriteScope writeSco } } else { throw new IllegalStateException( - "unexpected comparison operation [" - + operation - + "] " - + "for type [" - + irComparisonNode.getDecorationString(IRDExpressionType.class) - + "]" + Strings.format( + "unexpected comparison operation [%s] for type [%s]", + operation, + irComparisonNode.getDecorationString(IRDExpressionType.class) + ) ); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java index 8c6ac55e8ffe..9e95734f0978 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticAnalysisPhase.java @@ -8,12 +8,14 @@ package org.elasticsearch.painless.phase; +import org.elasticsearch.core.Strings; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.CompilerSettings; import org.elasticsearch.painless.FunctionRef; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.lookup.PainlessCast; +import org.elasticsearch.painless.lookup.PainlessClass; import org.elasticsearch.painless.lookup.PainlessClassBinding; import org.elasticsearch.painless.lookup.PainlessConstructor; import org.elasticsearch.painless.lookup.PainlessField; @@ -157,6 +159,10 @@ */ public class DefaultSemanticAnalysisPhase extends UserTreeBaseVisitor { + private static ClassCastException castError(String formatText, Object... arguments) { + return new ClassCastException(Strings.format(formatText, arguments)); + } + /** * Decorates a user expression node with a PainlessCast. */ @@ -251,13 +257,11 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (userBlockNode.getStatementNodes().isEmpty()) { throw userFunctionNode.createError( new IllegalArgumentException( - "invalid function definition: " - + "found no statements for function " - + "[" - + functionName - + "] with [" - + typeParameters.size() - + "] parameters" + Strings.format( + "invalid function definition: found no statements for function [%s] with [%d] parameters", + functionName, + typeParameters.size() + ) ) ); } @@ -270,13 +274,11 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (methodEscape == false && isAutoReturnEnabled == false && returnType != void.class) { throw userFunctionNode.createError( new IllegalArgumentException( - "invalid function definition: " - + "not all paths provide a return value for function " - + "[" - + functionName - + "] with [" - + typeParameters.size() - + "] parameters" + Strings.format( + "invalid function definition: not all paths provide a return value for function [%s] with [%d] parameters", + functionName, + typeParameters.size() + ) ) ); } @@ -750,14 +752,10 @@ public void visitReturn(SReturn userReturnNode, SemanticScope semanticScope) { if (userValueNode == null) { if (semanticScope.getReturnType() != void.class) { throw userReturnNode.createError( - new ClassCastException( - "cannot cast from " - + "[" - + semanticScope.getReturnCanonicalTypeName() - + "] to " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(void.class) - + "]" + castError( + "cannot cast from [%s] to [%s]", + semanticScope.getReturnCanonicalTypeName(), + PainlessLookupUtility.typeToCanonicalTypeName(void.class) ) ); } @@ -892,14 +890,12 @@ public void visitCatch(SCatch userCatchNode, SemanticScope semanticScope) { if (userCatchNode.getBaseException().isAssignableFrom(type) == false) { throw userCatchNode.createError( - new ClassCastException( - "cannot cast from [" - + PainlessLookupUtility.typeToCanonicalTypeName(type) - + "] " - + "to [" - + PainlessLookupUtility.typeToCanonicalTypeName(baseException) - + "]" + castError( + "cannot cast from [%s] to [%s]", + PainlessLookupUtility.typeToCanonicalTypeName(type), + PainlessLookupUtility.typeToCanonicalTypeName(baseException) ) + ); } @@ -1031,15 +1027,11 @@ public void visitAssignment(EAssignment userAssignmentNode, SemanticScope semant if (compoundType == null || (isShift && shiftType == null)) { throw userAssignmentNode.createError( - new ClassCastException( - "invalid compound assignment: " - + "cannot apply [" - + operation.symbol - + "=] to types [" - + leftValueType - + "] and [" - + rightValueType - + "]" + castError( + "invalid compound assignment: cannot apply [%s=] to types [%s] and [%s]", + operation.symbol, + leftValueType, + rightValueType ) ); } @@ -1162,17 +1154,13 @@ public void visitUnary(EUnary userUnaryNode, SemanticScope semanticScope) { if (unaryType == null) { throw userUnaryNode.createError( - new ClassCastException( - "cannot apply the " - + operation.name - + " operator " - + "[" - + operation.symbol - + "] to the type " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(childValueType) - + "]" + castError( + "cannot apply the %s operator [%s] to the type [%s]", + operation.name, + operation.symbol, + PainlessLookupUtility.typeToCanonicalTypeName(childValueType) ) + ); } @@ -1267,20 +1255,14 @@ public void visitBinary(EBinary userBinaryNode, SemanticScope semanticScope) { if (binaryType == null) { throw userBinaryNode.createError( - new ClassCastException( - "cannot apply the " - + operation.name - + " operator " - + "[" - + operation.symbol - + "] to the types " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(leftValueType) - + "] and " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(rightValueType) - + "]" + castError( + "cannot apply the %s operator [%s] to the types [%s] and [%s]", + operation.name, + operation.symbol, + PainlessLookupUtility.typeToCanonicalTypeName(leftValueType), + PainlessLookupUtility.typeToCanonicalTypeName(rightValueType) ) + ); } @@ -1404,19 +1386,12 @@ public void visitComp(EComp userCompNode, SemanticScope semanticScope) { if (promotedType == null) { throw userCompNode.createError( - new ClassCastException( - "cannot apply the " - + operation.name - + " operator " - + "[" - + operation.symbol - + "] to the types " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(leftValueType) - + "] and " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(rightValueType) - + "]" + castError( + "cannot apply the %s operator [%s] to the types [%s] and [%s]", + operation.name, + operation.symbol, + PainlessLookupUtility.typeToCanonicalTypeName(leftValueType), + PainlessLookupUtility.typeToCanonicalTypeName(rightValueType) ) ); } @@ -1565,13 +1540,11 @@ public void visitConditional(EConditional userConditionalNode, SemanticScope sem if (promote == null) { throw userConditionalNode.createError( new ClassCastException( - "cannot apply the conditional operator [?:] to the types " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(leftValueType) - + "] and " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(rightValueType) - + "]" + Strings.format( + "cannot apply the conditional operator [?:] to the types [%s] and [%s]", + PainlessLookupUtility.typeToCanonicalTypeName(leftValueType), + PainlessLookupUtility.typeToCanonicalTypeName(rightValueType) + ) ) ); } @@ -1821,12 +1794,11 @@ public void visitNewObj(ENewObj userNewObjNode, SemanticScope semanticScope) { if (semanticScope.getCondition(userNewObjNode, Write.class)) { throw userNewObjNode.createError( new IllegalArgumentException( - "invalid assignment cannot assign a value to new object with constructor " - + "[" - + canonicalTypeName - + "/" - + userArgumentsSize - + "]" + Strings.format( + "invalid assignment cannot assign a value to new object with constructor [%s/%d]", + canonicalTypeName, + userArgumentsSize + ) ) ); } @@ -1843,7 +1815,7 @@ public void visitNewObj(ENewObj userNewObjNode, SemanticScope semanticScope) { if (constructor == null) { throw userNewObjNode.createError( new IllegalArgumentException( - "constructor [" + typeToCanonicalTypeName(valueType) + ", /" + userArgumentsSize + "] not found" + Strings.format("constructor [%s, /%d] not found", typeToCanonicalTypeName(valueType), userArgumentsSize) ) ); } @@ -1854,14 +1826,12 @@ public void visitNewObj(ENewObj userNewObjNode, SemanticScope semanticScope) { if (constructor.typeParameters().size() != userArgumentsSize) { throw userNewObjNode.createError( new IllegalArgumentException( - "When calling constructor on type [" - + PainlessLookupUtility.typeToCanonicalTypeName(valueType) - + "] " - + "expected [" - + constructor.typeParameters().size() - + "] arguments, but found [" - + userArgumentsSize - + "]." + Strings.format( + "When calling constructor on type [%s] expected [%d] arguments, but found [%d].", + PainlessLookupUtility.typeToCanonicalTypeName(valueType), + constructor.typeParameters().size(), + userArgumentsSize + ) ) ); } @@ -2294,13 +2264,12 @@ public void visitRegex(ERegex userRegexNode, SemanticScope semanticScope) { } catch (PatternSyntaxException pse) { throw new Location(location.getSourceName(), location.getOffset() + 1 + pse.getIndex()).createError( new IllegalArgumentException( - "invalid regular expression: " - + "could not compile regex constant [" - + pattern - + "] with flags [" - + flags - + "]: " - + pse.getDescription(), + Strings.format( + "invalid regular expression: could not compile regex constant [%s] with flags [%s]: %s", + pattern, + flags, + pse.getDescription() + ), pse ) ); @@ -2363,11 +2332,11 @@ public void visitLambda(ELambda userLambdaNode, SemanticScope semanticScope) { } // check arity before we manipulate parameters if (interfaceMethod.typeParameters().size() != canonicalTypeNameParameters.size()) throw new IllegalArgumentException( - "Incorrect number of parameters for [" - + interfaceMethod.javaMethod().getName() - + "] in [" - + targetType.getTargetCanonicalTypeName() - + "]" + Strings.format( + "Incorrect number of parameters for [%s] in [%s]", + interfaceMethod.javaMethod().getName(), + targetType.getTargetCanonicalTypeName() + ) ); // for method invocation, its allowed to ignore the return value if (interfaceMethod.returnType() == void.class) { @@ -2724,13 +2693,11 @@ public void visitDot(EDot userDotNode, SemanticScope semanticScope) { if (prefixValueType != null && prefixStaticType != null) { throw userDotNode.createError( new IllegalStateException( - "cannot have both " - + "value [" - + prefixValueType.getValueCanonicalTypeName() - + "] " - + "and type [" - + prefixStaticType.getStaticCanonicalTypeName() - + "]" + Strings.format( + "cannot have both value [%s] and type [%s]", + prefixValueType.getValueCanonicalTypeName(), + prefixStaticType.getStaticCanonicalTypeName() + ) ) ); } @@ -3248,13 +3215,11 @@ public void visitCall(ECall userCallNode, SemanticScope semanticScope) { if (prefixValueType != null && prefixStaticType != null) { throw userCallNode.createError( new IllegalStateException( - "cannot have both " - + "value [" - + prefixValueType.getValueCanonicalTypeName() - + "] " - + "and type [" - + prefixStaticType.getStaticCanonicalTypeName() - + "]" + Strings.format( + "cannot have both value [%s] and type [%s]", + prefixValueType.getValueCanonicalTypeName(), + prefixStaticType.getStaticCanonicalTypeName() + ) ) ); } @@ -3283,21 +3248,20 @@ public void visitCall(ECall userCallNode, SemanticScope semanticScope) { method = lookup.lookupPainlessMethod(type, false, methodName, userArgumentsSize); if (method == null) { - dynamic = lookup.lookupPainlessClass(type).annotations.containsKey(DynamicTypeAnnotation.class) + PainlessClass pc = lookup.lookupPainlessClass(type); + dynamic = pc != null + && pc.annotations.containsKey(DynamicTypeAnnotation.class) && lookup.lookupPainlessSubClassesMethod(type, methodName, userArgumentsSize) != null; if (dynamic == false) { throw userCallNode.createError( new IllegalArgumentException( - "member method " - + "[" - + prefixValueType.getValueCanonicalTypeName() - + ", " - + methodName - + "/" - + userArgumentsSize - + "] " - + "not found" + Strings.format( + "member method [%s, %s/%d] not found", + prefixValueType.getValueCanonicalTypeName(), + methodName, + userArgumentsSize + ) ) ); } @@ -3311,15 +3275,12 @@ public void visitCall(ECall userCallNode, SemanticScope semanticScope) { if (method == null) { throw userCallNode.createError( new IllegalArgumentException( - "static method " - + "[" - + prefixStaticType.getStaticCanonicalTypeName() - + ", " - + methodName - + "/" - + userArgumentsSize - + "] " - + "not found" + Strings.format( + "static method [%s, %s/%d] not found", + prefixStaticType.getStaticCanonicalTypeName(), + methodName, + userArgumentsSize + ) ) ); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticHeaderPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticHeaderPhase.java index 24d5508c5de7..61be12039cbe 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticHeaderPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultSemanticHeaderPhase.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless.phase; +import org.elasticsearch.core.Strings; import org.elasticsearch.painless.lookup.PainlessLookup; import org.elasticsearch.painless.node.SClass; import org.elasticsearch.painless.node.SFunction; @@ -36,15 +37,13 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (parameterCount != parameterNames.size()) { throw userFunctionNode.createError( new IllegalStateException( - "invalid function definition: " - + "parameter types size [" - + canonicalTypeNameParameters.size() - + "] is not equal to " - + "parameter names size [" - + parameterNames.size() - + "] for function [" - + functionName - + "]" + Strings.format( + "invalid function definition: " + + "parameter types size [%d] is not equal to parameter names size [%d] for function [%s]", + canonicalTypeNameParameters.size(), + parameterNames.size(), + functionName + ) ) ); } @@ -65,12 +64,11 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (returnType == null) { throw userFunctionNode.createError( new IllegalArgumentException( - "invalid function definition: " - + "return type [" - + returnCanonicalTypeName - + "] not found for function [" - + functionKey - + "]" + Strings.format( + "invalid function definition: return type [%s] not found for function [%s]", + returnCanonicalTypeName, + functionKey + ) ) ); } @@ -83,12 +81,11 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (paramType == null) { throw userFunctionNode.createError( new IllegalArgumentException( - "invalid function definition: " - + "parameter type [" - + typeParameter - + "] not found for function [" - + functionKey - + "]" + Strings.format( + "invalid function definition: parameter type [%s] not found for function [%s]", + typeParameter, + functionKey + ) ) ); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java index ddbdd2ce5ecb..4739f7682ef3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/DefaultUserTreeToIRTreePhase.java @@ -15,6 +15,7 @@ import org.elasticsearch.painless.MethodWriter; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.WriterConstants; +import org.elasticsearch.painless.api.ValueIterator; import org.elasticsearch.painless.ir.BinaryImplNode; import org.elasticsearch.painless.ir.BinaryMathNode; import org.elasticsearch.painless.ir.BlockNode; @@ -759,17 +760,24 @@ public void visitEach(SEach userEachNode, ScriptScope scriptScope) { irForEachSubIterableNode.setBlockNode(irBlockNode); irForEachSubIterableNode.attachDecoration(new IRDVariableType(variable.type())); irForEachSubIterableNode.attachDecoration(new IRDVariableName(variable.name())); - irForEachSubIterableNode.attachDecoration(new IRDIterableType(Iterator.class)); irForEachSubIterableNode.attachDecoration(new IRDIterableName("#itr" + userEachNode.getLocation().getOffset())); if (iterableValueType != def.class) { + irForEachSubIterableNode.attachDecoration(new IRDIterableType(Iterator.class)); irForEachSubIterableNode.attachDecoration( new IRDMethod(scriptScope.getDecoration(userEachNode, IterablePainlessMethod.class).iterablePainlessMethod()) ); - } - if (painlessCast != null) { - irForEachSubIterableNode.attachDecoration(new IRDCast(painlessCast)); + if (painlessCast != null) { + irForEachSubIterableNode.attachDecoration(new IRDCast(painlessCast)); + } + } else { + // use ValueIterator as we could be iterating over an array directly + irForEachSubIterableNode.attachDecoration(new IRDIterableType(ValueIterator.class)); + + if (painlessCast != null && variable.type().isPrimitive() == false) { + irForEachSubIterableNode.attachDecoration(new IRDCast(painlessCast)); + } } irConditionNode = irForEachSubIterableNode; @@ -905,7 +913,7 @@ public void visitAssignment(EAssignment userAssignmentNode, ScriptScope scriptSc irCompoundNode = stringConcatenationNode; // must handle the StringBuilder case for java version <= 8 - if (irLoadNode instanceof BinaryImplNode bin && WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE == null) { + if (irLoadNode instanceof BinaryImplNode bin && WriterConstants.STRING_CONCAT_BOOTSTRAP_HANDLE == null) { bin.getLeftNode().attachDecoration(new IRDDepth(1)); } // handles when the operation is mathematical diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java index 89eeca52be4b..a7b4771a1fbf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticAnalysisPhase.java @@ -8,6 +8,7 @@ package org.elasticsearch.painless.phase; +import org.elasticsearch.core.Strings; import org.elasticsearch.painless.AnalyzerCaster; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.ScriptClassInfo; @@ -77,13 +78,11 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (userBlockNode.getStatementNodes().isEmpty()) { throw userFunctionNode.createError( new IllegalArgumentException( - "invalid function definition: " - + "found no statements for function " - + "[" - + functionName - + "] with [" - + typeParameters.size() - + "] parameters" + Strings.format( + "invalid function definition: found no statements for function [%s] with [%d] parameters", + functionName, + typeParameters.size() + ) ) ); } @@ -161,13 +160,11 @@ public void visitReturn(SReturn userReturnNode, SemanticScope semanticScope) { if (semanticScope.getReturnType() != void.class) { throw userReturnNode.createError( new ClassCastException( - "cannot cast from " - + "[" - + semanticScope.getReturnCanonicalTypeName() - + "] to " - + "[" - + PainlessLookupUtility.typeToCanonicalTypeName(void.class) - + "]" + Strings.format( + "cannot cast from [%s] to [%s]", + semanticScope.getReturnCanonicalTypeName(), + PainlessLookupUtility.typeToCanonicalTypeName(void.class) + ) ) ); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticHeaderPhase.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticHeaderPhase.java index ed5ca0a63f09..083f14d88b08 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticHeaderPhase.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/phase/PainlessSemanticHeaderPhase.java @@ -31,7 +31,7 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { if (functionTable.getFunction(functionKey) != null) { throw userFunctionNode.createError( - new IllegalArgumentException("invalid function definition: " + "found duplicate function [" + functionKey + "].") + new IllegalArgumentException("invalid function definition: found duplicate function [" + functionKey + "].") ); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java index 4a93a4dc6866..5ab1a154ade5 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ArrayTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.painless; -import org.apache.lucene.util.Constants; import org.hamcrest.Matcher; import java.lang.invoke.MethodHandle; @@ -34,7 +33,6 @@ protected Matcher outOfBoundsExceptionMessageMatcher(int index, int size } public void testArrayLengthHelper() throws Throwable { - assertEquals(Constants.JRE_IS_MINIMUM_JAVA9, Def.JAVA9_ARRAY_LENGTH_MH_FACTORY != null); assertArrayLength(2, new int[2]); assertArrayLength(3, new long[3]); assertArrayLength(4, new byte[4]); @@ -81,4 +79,129 @@ public void testForLoop() { public void testDivideArray() { assertEquals(1, exec("def[] x = new def[1]; x[0] = 2; return x[0] / 2")); } + + public void testPrimitiveIteration() { + assertEquals(true, exec("def x = new boolean[] { true, false }; boolean s = false; for (boolean l : x) s |= l; return s")); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new boolean[] { true, false }; short s = 0; for (short l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new boolean[] { true, false }; char s = 0; for (char l : x) s = l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new boolean[] { true, false }; int s = 0; for (int l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new boolean[] { true, false }; long s = 0; for (long l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new boolean[] { true, false }; float s = 0; for (float l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new boolean[] { true, false }; double s = 0; for (double l : x) s += l; return s") + ); + assertEquals(true, exec("def x = new boolean[] { true, false }; boolean s = false; for (def l : x) s |= l; return s")); + + assertEquals((byte) 30, exec("def x = new byte[] { (byte)10, (byte)20 }; byte s = 0; for (byte l : x) s += l; return s")); + assertEquals((short) 30, exec("def x = new byte[] { (byte)10, (byte)20 }; short s = 0; for (short l : x) s += l; return s")); + assertEquals((char) 20, exec("def x = new byte[] { (byte)10, (byte)20 }; char s = 0; for (char l : x) s = l; return s")); + assertEquals(30, exec("def x = new byte[] { (byte)10, (byte)20 }; int s = 0; for (int l : x) s += l; return s")); + assertEquals(30L, exec("def x = new byte[] { (byte)10, (byte)20 }; long s = 0; for (long l : x) s += l; return s")); + assertEquals(30f, exec("def x = new byte[] { (byte)10, (byte)20 }; float s = 0; for (float l : x) s += l; return s")); + assertEquals(30d, exec("def x = new byte[] { (byte)10, (byte)20 }; double s = 0; for (double l : x) s += l; return s")); + assertEquals((byte) 30, exec("def x = new byte[] { (byte)10, (byte)20 }; byte s = 0; for (def l : x) s += l; return s")); + + assertEquals((byte) 30, exec("def x = new short[] { (short)10, (short)20 }; byte s = 0; for (byte l : x) s += l; return s")); + assertEquals((short) 300, exec("def x = new short[] { (short)100, (short)200 }; short s = 0; for (short l : x) s += l; return s")); + assertEquals((char) 200, exec("def x = new short[] { (short)100, (short)200 }; char s = 0; for (char l : x) s = l; return s")); + assertEquals(300, exec("def x = new short[] { (short)100, (short)200 }; int s = 0; for (int l : x) s += l; return s")); + assertEquals(300L, exec("def x = new short[] { (short)100, (short)200 }; long s = 0; for (long l : x) s += l; return s")); + assertEquals(300f, exec("def x = new short[] { (short)100, (short)200 }; float s = 0; for (float l : x) s += l; return s")); + assertEquals(300d, exec("def x = new short[] { (short)100, (short)200 }; double s = 0; for (double l : x) s += l; return s")); + assertEquals((short) 300, exec("def x = new short[] { (short)100, (short)200 }; short s = 0; for (def l : x) s += l; return s")); + + assertEquals((byte) 'b', exec("def x = new char[] { (char)'a', (char)'b' }; byte s = 0; for (byte l : x) s = l; return s")); + assertEquals((short) 'b', exec("def x = new char[] { (char)'a', (char)'b' }; short s = 0; for (short l : x) s = l; return s")); + assertEquals('b', exec("def x = new char[] { (char)'a', (char)'b' }; char s = 0; for (char l : x) s = l; return s")); + assertEquals((int) 'b', exec("def x = new char[] { (char)'a', (char)'b' }; int s = 0; for (int l : x) s = l; return s")); + assertEquals((long) 'b', exec("def x = new char[] { (char)'a', (char)'b' }; long s = 0; for (long l : x) s = l; return s")); + assertEquals((float) 'b', exec("def x = new char[] { (char)'a', (char)'b' }; float s = 0; for (float l : x) s = l; return s")); + assertEquals((double) 'b', exec("def x = new char[] { (char)'a', (char)'b' }; double s = 0; for (double l : x) s = l; return s")); + assertEquals('b', exec("def x = new char[] { (char)'a', (char)'b' }; char s = 0; for (def l : x) s = l; return s")); + + assertEquals((byte) 30, exec("def x = new int[] { 10, 20 }; byte s = 0; for (byte l : x) s += l; return s")); + assertEquals((short) 300, exec("def x = new int[] { 100, 200 }; short s = 0; for (short l : x) s += l; return s")); + assertEquals((char) 200, exec("def x = new int[] { 100, 200 }; char s = 0; for (char l : x) s = l; return s")); + assertEquals(300, exec("def x = new int[] { 100, 200 }; int s = 0; for (int l : x) s += l; return s")); + assertEquals(300L, exec("def x = new int[] { 100, 200 }; long s = 0; for (long l : x) s += l; return s")); + assertEquals(300f, exec("def x = new int[] { 100, 200 }; float s = 0; for (float l : x) s += l; return s")); + assertEquals(300d, exec("def x = new int[] { 100, 200 }; double s = 0; for (double l : x) s += l; return s")); + assertEquals(300, exec("def x = new int[] { 100, 200 }; int s = 0; for (def l : x) s += l; return s")); + + assertEquals((byte) 30, exec("def x = new long[] { 10, 20 }; byte s = 0; for (byte l : x) s += l; return s")); + assertEquals((short) 300, exec("def x = new long[] { 100, 200 }; short s = 0; for (short l : x) s += l; return s")); + assertEquals((char) 200, exec("def x = new long[] { 100, 200 }; char s = 0; for (char l : x) s = l; return s")); + assertEquals(300, exec("def x = new long[] { 100, 200 }; int s = 0; for (int l : x) s += l; return s")); + assertEquals(300L, exec("def x = new long[] { 100, 200 }; long s = 0; for (long l : x) s += l; return s")); + assertEquals(300f, exec("def x = new long[] { 100, 200 }; float s = 0; for (float l : x) s += l; return s")); + assertEquals(300d, exec("def x = new long[] { 100, 200 }; double s = 0; for (double l : x) s += l; return s")); + assertEquals(300L, exec("def x = new long[] { 100, 200 }; long s = 0; for (def l : x) s += l; return s")); + + assertEquals((byte) 30, exec("def x = new float[] { 10, 20 }; byte s = 0; for (byte l : x) s += l; return s")); + assertEquals((short) 300, exec("def x = new float[] { 100, 200 }; short s = 0; for (short l : x) s += l; return s")); + assertEquals((char) 200, exec("def x = new float[] { 100, 200 }; char s = 0; for (char l : x) s = l; return s")); + assertEquals(300, exec("def x = new float[] { 100, 200 }; int s = 0; for (int l : x) s += l; return s")); + assertEquals(300L, exec("def x = new float[] { 100, 200 }; long s = 0; for (long l : x) s += l; return s")); + assertEquals(300f, exec("def x = new float[] { 100, 200 }; float s = 0; for (float l : x) s += l; return s")); + assertEquals(300d, exec("def x = new float[] { 100, 200 }; double s = 0; for (double l : x) s += l; return s")); + assertEquals(300f, exec("def x = new float[] { 100, 200 }; float s = 0; for (def l : x) s += l; return s")); + + assertEquals((byte) 30, exec("def x = new double[] { 10, 20 }; byte s = 0; for (byte l : x) s += l; return s")); + assertEquals((short) 300, exec("def x = new double[] { 100, 200 }; short s = 0; for (short l : x) s += l; return s")); + assertEquals((char) 200, exec("def x = new double[] { 100, 200 }; char s = 0; for (char l : x) s = l; return s")); + assertEquals(300, exec("def x = new double[] { 100, 200 }; int s = 0; for (int l : x) s += l; return s")); + assertEquals(300L, exec("def x = new double[] { 100, 200 }; long s = 0; for (long l : x) s += l; return s")); + assertEquals(300f, exec("def x = new double[] { 100, 200 }; float s = 0; for (float l : x) s += l; return s")); + assertEquals(300d, exec("def x = new double[] { 100, 200 }; double s = 0; for (double l : x) s += l; return s")); + assertEquals(300d, exec("def x = new double[] { 100, 200 }; double s = 0; for (def l : x) s += l; return s")); + + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; boolean s = false; for (boolean l : x) s |= l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; byte s = 0; for (byte l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; short s = 0; for (short l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; char s = 0; for (char l : x) s = l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; int s = 0; for (int l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; long s = 0; for (long l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; float s = 0; for (float l : x) s += l; return s") + ); + expectScriptThrows( + ClassCastException.class, + () -> exec("def x = new String[] { 'foo', 'bar' }; double s = 0; for (double l : x) s += l; return s") + ); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ComparisonTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ComparisonTests.java index 89d1872e1ccc..b7bd562a3ee0 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ComparisonTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ComparisonTests.java @@ -11,6 +11,14 @@ public class ComparisonTests extends ScriptTestCase { public void testDefEq() { + assertEquals(true, exec("def x = (byte)7; def y = (char)7; return x == y")); + assertEquals(true, exec("def x = (short)6; def y = (char)6; return x == y")); + assertEquals(true, exec("def x = (char)5; def y = (char)5; return x == y")); + assertEquals(true, exec("def x = (int)4; def y = (char)4; return x == y")); + assertEquals(false, exec("def x = (long)5; def y = (char)3; return x == y")); + assertEquals(false, exec("def x = (float)6; def y = (char)2; return x == y")); + assertEquals(false, exec("def x = (double)7; def y = (char)1; return x == y")); + assertEquals(true, exec("def x = (byte)7; def y = (int)7; return x == y")); assertEquals(true, exec("def x = (short)6; def y = (int)6; return x == y")); assertEquals(true, exec("def x = (char)5; def y = (int)5; return x == y")); @@ -19,6 +27,22 @@ public void testDefEq() { assertEquals(false, exec("def x = (float)6; def y = (int)2; return x == y")); assertEquals(false, exec("def x = (double)7; def y = (int)1; return x == y")); + assertEquals(true, exec("def x = (byte)7; def y = (long)7; return x == y")); + assertEquals(true, exec("def x = (short)6; def y = (long)6; return x == y")); + assertEquals(true, exec("def x = (char)5; def y = (long)5; return x == y")); + assertEquals(true, exec("def x = (int)4; def y = (long)4; return x == y")); + assertEquals(false, exec("def x = (long)5; def y = (long)3; return x == y")); + assertEquals(false, exec("def x = (float)6; def y = (long)2; return x == y")); + assertEquals(false, exec("def x = (double)7; def y = (long)1; return x == y")); + + assertEquals(true, exec("def x = (byte)7; def y = (float)7; return x == y")); + assertEquals(true, exec("def x = (short)6; def y = (float)6; return x == y")); + assertEquals(true, exec("def x = (char)5; def y = (float)5; return x == y")); + assertEquals(true, exec("def x = (int)4; def y = (float)4; return x == y")); + assertEquals(false, exec("def x = (long)5; def y = (float)3; return x == y")); + assertEquals(false, exec("def x = (float)6; def y = (float)2; return x == y")); + assertEquals(false, exec("def x = (double)7; def y = (float)1; return x == y")); + assertEquals(true, exec("def x = (byte)7; def y = (double)7; return x == y")); assertEquals(true, exec("def x = (short)6; def y = (double)6; return x == y")); assertEquals(true, exec("def x = (char)5; def y = (double)5; return x == y")); @@ -41,6 +65,14 @@ public void testDefEq() { } public void testDefEqTypedLHS() { + assertEquals(true, exec("byte x = (byte)7; def y = (char)7; return x == y")); + assertEquals(true, exec("short x = (short)6; def y = (char)6; return x == y")); + assertEquals(true, exec("char x = (char)5; def y = (char)5; return x == y")); + assertEquals(true, exec("int x = (int)4; def y = (char)4; return x == y")); + assertEquals(false, exec("long x = (long)5; def y = (char)3; return x == y")); + assertEquals(false, exec("float x = (float)6; def y = (char)2; return x == y")); + assertEquals(false, exec("double x = (double)7; def y = (char)1; return x == y")); + assertEquals(true, exec("byte x = (byte)7; def y = (int)7; return x == y")); assertEquals(true, exec("short x = (short)6; def y = (int)6; return x == y")); assertEquals(true, exec("char x = (char)5; def y = (int)5; return x == y")); @@ -49,6 +81,22 @@ public void testDefEqTypedLHS() { assertEquals(false, exec("float x = (float)6; def y = (int)2; return x == y")); assertEquals(false, exec("double x = (double)7; def y = (int)1; return x == y")); + assertEquals(true, exec("byte x = (byte)7; def y = (long)7; return x == y")); + assertEquals(true, exec("short x = (short)6; def y = (long)6; return x == y")); + assertEquals(true, exec("char x = (char)5; def y = (long)5; return x == y")); + assertEquals(true, exec("int x = (int)4; def y = (long)4; return x == y")); + assertEquals(false, exec("long x = (long)5; def y = (long)3; return x == y")); + assertEquals(false, exec("float x = (float)6; def y = (long)2; return x == y")); + assertEquals(false, exec("double x = (double)7; def y = (long)1; return x == y")); + + assertEquals(true, exec("byte x = (byte)7; def y = (float)7; return x == y")); + assertEquals(true, exec("short x = (short)6; def y = (float)6; return x == y")); + assertEquals(true, exec("char x = (char)5; def y = (float)5; return x == y")); + assertEquals(true, exec("int x = (int)4; def y = (float)4; return x == y")); + assertEquals(false, exec("long x = (long)5; def y = (float)3; return x == y")); + assertEquals(false, exec("float x = (float)6; def y = (float)2; return x == y")); + assertEquals(false, exec("double x = (double)7; def y = (float)1; return x == y")); + assertEquals(true, exec("byte x = (byte)7; def y = (double)7; return x == y")); assertEquals(true, exec("short x = (short)6; def y = (double)6; return x == y")); assertEquals(true, exec("char x = (char)5; def y = (double)5; return x == y")); @@ -70,6 +118,14 @@ public void testDefEqTypedLHS() { } public void testDefEqTypedRHS() { + assertEquals(true, exec("def x = (byte)7; char y = (char)7; return x == y")); + assertEquals(true, exec("def x = (short)6; char y = (char)6; return x == y")); + assertEquals(true, exec("def x = (char)5; char y = (char)5; return x == y")); + assertEquals(true, exec("def x = (int)4; char y = (char)4; return x == y")); + assertEquals(false, exec("def x = (long)5; char y = (char)3; return x == y")); + assertEquals(false, exec("def x = (float)6; char y = (char)2; return x == y")); + assertEquals(false, exec("def x = (double)7; char y = (char)1; return x == y")); + assertEquals(true, exec("def x = (byte)7; int y = (int)7; return x == y")); assertEquals(true, exec("def x = (short)6; int y = (int)6; return x == y")); assertEquals(true, exec("def x = (char)5; int y = (int)5; return x == y")); @@ -78,6 +134,22 @@ public void testDefEqTypedRHS() { assertEquals(false, exec("def x = (float)6; int y = (int)2; return x == y")); assertEquals(false, exec("def x = (double)7; int y = (int)1; return x == y")); + assertEquals(true, exec("def x = (byte)7; long y = (long)7; return x == y")); + assertEquals(true, exec("def x = (short)6; long y = (long)6; return x == y")); + assertEquals(true, exec("def x = (char)5; long y = (long)5; return x == y")); + assertEquals(true, exec("def x = (int)4; long y = (long)4; return x == y")); + assertEquals(false, exec("def x = (long)5; long y = (long)3; return x == y")); + assertEquals(false, exec("def x = (float)6; long y = (long)2; return x == y")); + assertEquals(false, exec("def x = (double)7; long y = (long)1; return x == y")); + + assertEquals(true, exec("def x = (byte)7; float y = (float)7; return x == y")); + assertEquals(true, exec("def x = (short)6; float y = (float)6; return x == y")); + assertEquals(true, exec("def x = (char)5; float y = (float)5; return x == y")); + assertEquals(true, exec("def x = (int)4; float y = (float)4; return x == y")); + assertEquals(false, exec("def x = (long)5; float y = (float)3; return x == y")); + assertEquals(false, exec("def x = (float)6; float y = (float)2; return x == y")); + assertEquals(false, exec("def x = (double)7; float y = (float)1; return x == y")); + assertEquals(true, exec("def x = (byte)7; double y = (double)7; return x == y")); assertEquals(true, exec("def x = (short)6; double y = (double)6; return x == y")); assertEquals(true, exec("def x = (char)5; double y = (double)5; return x == y")); @@ -106,6 +178,7 @@ public void testDefEqr() { assertEquals(false, exec("def x = (long)5; def y = (int)3; return x === y")); assertEquals(false, exec("def x = (float)6; def y = (int)2; return x === y")); assertEquals(false, exec("def x = (double)7; def y = (int)1; return x === y")); + assertEquals(false, exec("def x = false; def y = true; return x === y")); assertEquals(false, exec("def x = new HashMap(); def y = new HashMap(); return x === y")); @@ -115,6 +188,14 @@ public void testDefEqr() { } public void testDefNe() { + assertEquals(false, exec("def x = (byte)7; def y = (char)7; return x != y")); + assertEquals(false, exec("def x = (short)6; def y = (char)6; return x != y")); + assertEquals(false, exec("def x = (char)5; def y = (char)5; return x != y")); + assertEquals(false, exec("def x = (int)4; def y = (char)4; return x != y")); + assertEquals(true, exec("def x = (long)5; def y = (char)3; return x != y")); + assertEquals(true, exec("def x = (float)6; def y = (char)2; return x != y")); + assertEquals(true, exec("def x = (double)7; def y = (char)1; return x != y")); + assertEquals(false, exec("def x = (byte)7; def y = (int)7; return x != y")); assertEquals(false, exec("def x = (short)6; def y = (int)6; return x != y")); assertEquals(false, exec("def x = (char)5; def y = (int)5; return x != y")); @@ -123,6 +204,22 @@ public void testDefNe() { assertEquals(true, exec("def x = (float)6; def y = (int)2; return x != y")); assertEquals(true, exec("def x = (double)7; def y = (int)1; return x != y")); + assertEquals(false, exec("def x = (byte)7; def y = (long)7; return x != y")); + assertEquals(false, exec("def x = (short)6; def y = (long)6; return x != y")); + assertEquals(false, exec("def x = (char)5; def y = (long)5; return x != y")); + assertEquals(false, exec("def x = (int)4; def y = (long)4; return x != y")); + assertEquals(true, exec("def x = (long)5; def y = (long)3; return x != y")); + assertEquals(true, exec("def x = (float)6; def y = (long)2; return x != y")); + assertEquals(true, exec("def x = (double)7; def y = (long)1; return x != y")); + + assertEquals(false, exec("def x = (byte)7; def y = (float)7; return x != y")); + assertEquals(false, exec("def x = (short)6; def y = (float)6; return x != y")); + assertEquals(false, exec("def x = (char)5; def y = (float)5; return x != y")); + assertEquals(false, exec("def x = (int)4; def y = (float)4; return x != y")); + assertEquals(true, exec("def x = (long)5; def y = (float)3; return x != y")); + assertEquals(true, exec("def x = (float)6; def y = (float)2; return x != y")); + assertEquals(true, exec("def x = (double)7; def y = (float)1; return x != y")); + assertEquals(false, exec("def x = (byte)7; def y = (double)7; return x != y")); assertEquals(false, exec("def x = (short)6; def y = (double)6; return x != y")); assertEquals(false, exec("def x = (char)5; def y = (double)5; return x != y")); @@ -143,6 +240,14 @@ public void testDefNe() { } public void testDefNeTypedLHS() { + assertEquals(false, exec("byte x = (byte)7; def y = (char)7; return x != y")); + assertEquals(false, exec("short x = (short)6; def y = (char)6; return x != y")); + assertEquals(false, exec("char x = (char)5; def y = (char)5; return x != y")); + assertEquals(false, exec("int x = (int)4; def y = (char)4; return x != y")); + assertEquals(true, exec("long x = (long)5; def y = (char)3; return x != y")); + assertEquals(true, exec("float x = (float)6; def y = (char)2; return x != y")); + assertEquals(true, exec("double x = (double)7; def y = (char)1; return x != y")); + assertEquals(false, exec("byte x = (byte)7; def y = (int)7; return x != y")); assertEquals(false, exec("short x = (short)6; def y = (int)6; return x != y")); assertEquals(false, exec("char x = (char)5; def y = (int)5; return x != y")); @@ -151,6 +256,22 @@ public void testDefNeTypedLHS() { assertEquals(true, exec("float x = (float)6; def y = (int)2; return x != y")); assertEquals(true, exec("double x = (double)7; def y = (int)1; return x != y")); + assertEquals(false, exec("byte x = (byte)7; def y = (long)7; return x != y")); + assertEquals(false, exec("short x = (short)6; def y = (long)6; return x != y")); + assertEquals(false, exec("char x = (char)5; def y = (long)5; return x != y")); + assertEquals(false, exec("int x = (int)4; def y = (long)4; return x != y")); + assertEquals(true, exec("long x = (long)5; def y = (long)3; return x != y")); + assertEquals(true, exec("float x = (float)6; def y = (long)2; return x != y")); + assertEquals(true, exec("double x = (double)7; def y = (long)1; return x != y")); + + assertEquals(false, exec("byte x = (byte)7; def y = (float)7; return x != y")); + assertEquals(false, exec("short x = (short)6; def y = (float)6; return x != y")); + assertEquals(false, exec("char x = (char)5; def y = (float)5; return x != y")); + assertEquals(false, exec("int x = (int)4; def y = (float)4; return x != y")); + assertEquals(true, exec("long x = (long)5; def y = (float)3; return x != y")); + assertEquals(true, exec("float x = (float)6; def y = (float)2; return x != y")); + assertEquals(true, exec("double x = (double)7; def y = (float)1; return x != y")); + assertEquals(false, exec("byte x = (byte)7; def y = (double)7; return x != y")); assertEquals(false, exec("short x = (short)6; def y = (double)6; return x != y")); assertEquals(false, exec("char x = (char)5; def y = (double)5; return x != y")); @@ -171,6 +292,14 @@ public void testDefNeTypedLHS() { } public void testDefNeTypedRHS() { + assertEquals(false, exec("def x = (byte)7; char y = (char)7; return x != y")); + assertEquals(false, exec("def x = (short)6; char y = (char)6; return x != y")); + assertEquals(false, exec("def x = (char)5; char y = (char)5; return x != y")); + assertEquals(false, exec("def x = (int)4; char y = (char)4; return x != y")); + assertEquals(true, exec("def x = (long)5; char y = (char)3; return x != y")); + assertEquals(true, exec("def x = (float)6; char y = (char)2; return x != y")); + assertEquals(true, exec("def x = (double)7; char y = (char)1; return x != y")); + assertEquals(false, exec("def x = (byte)7; int y = (int)7; return x != y")); assertEquals(false, exec("def x = (short)6; int y = (int)6; return x != y")); assertEquals(false, exec("def x = (char)5; int y = (int)5; return x != y")); @@ -179,6 +308,22 @@ public void testDefNeTypedRHS() { assertEquals(true, exec("def x = (float)6; int y = (int)2; return x != y")); assertEquals(true, exec("def x = (double)7; int y = (int)1; return x != y")); + assertEquals(false, exec("def x = (byte)7; long y = (long)7; return x != y")); + assertEquals(false, exec("def x = (short)6; long y = (long)6; return x != y")); + assertEquals(false, exec("def x = (char)5; long y = (long)5; return x != y")); + assertEquals(false, exec("def x = (int)4; long y = (long)4; return x != y")); + assertEquals(true, exec("def x = (long)5; long y = (long)3; return x != y")); + assertEquals(true, exec("def x = (float)6; long y = (long)2; return x != y")); + assertEquals(true, exec("def x = (double)7; long y = (long)1; return x != y")); + + assertEquals(false, exec("def x = (byte)7; float y = (float)7; return x != y")); + assertEquals(false, exec("def x = (short)6; float y = (float)6; return x != y")); + assertEquals(false, exec("def x = (char)5; float y = (float)5; return x != y")); + assertEquals(false, exec("def x = (int)4; float y = (float)4; return x != y")); + assertEquals(true, exec("def x = (long)5; float y = (float)3; return x != y")); + assertEquals(true, exec("def x = (float)6; float y = (float)2; return x != y")); + assertEquals(true, exec("def x = (double)7; float y = (float)1; return x != y")); + assertEquals(false, exec("def x = (byte)7; double y = (double)7; return x != y")); assertEquals(false, exec("def x = (short)6; double y = (double)6; return x != y")); assertEquals(false, exec("def x = (char)5; double y = (double)5; return x != y")); @@ -222,6 +367,22 @@ public void testDefLt() { assertEquals(false, exec("def x = (float)6; def y = (int)2; return x < y")); assertEquals(false, exec("def x = (double)7; def y = (int)1; return x < y")); + assertEquals(true, exec("def x = (byte)1; def y = (long)7; return x < y")); + assertEquals(true, exec("def x = (short)2; def y = (long)6; return x < y")); + assertEquals(true, exec("def x = (char)3; def y = (long)5; return x < y")); + assertEquals(false, exec("def x = (int)4; def y = (long)4; return x < y")); + assertEquals(false, exec("def x = (long)5; def y = (long)3; return x < y")); + assertEquals(false, exec("def x = (float)6; def y = (long)2; return x < y")); + assertEquals(false, exec("def x = (double)7; def y = (long)1; return x < y")); + + assertEquals(true, exec("def x = (byte)1; def y = (float)7; return x < y")); + assertEquals(true, exec("def x = (short)2; def y = (float)6; return x < y")); + assertEquals(true, exec("def x = (char)3; def y = (float)5; return x < y")); + assertEquals(false, exec("def x = (int)4; def y = (float)4; return x < y")); + assertEquals(false, exec("def x = (long)5; def y = (float)3; return x < y")); + assertEquals(false, exec("def x = (float)6; def y = (float)2; return x < y")); + assertEquals(false, exec("def x = (double)7; def y = (float)1; return x < y")); + assertEquals(true, exec("def x = (byte)1; def y = (double)7; return x < y")); assertEquals(true, exec("def x = (short)2; def y = (double)6; return x < y")); assertEquals(true, exec("def x = (char)3; def y = (double)5; return x < y")); @@ -240,6 +401,22 @@ public void testDefLtTypedLHS() { assertEquals(false, exec("float x = (float)6; def y = (int)2; return x < y")); assertEquals(false, exec("double x = (double)7; def y = (int)1; return x < y")); + assertEquals(true, exec("byte x = (byte)1; def y = (long)7; return x < y")); + assertEquals(true, exec("short x = (short)2; def y = (long)6; return x < y")); + assertEquals(true, exec("char x = (char)3; def y = (long)5; return x < y")); + assertEquals(false, exec("int x = (int)4; def y = (long)4; return x < y")); + assertEquals(false, exec("long x = (long)5; def y = (long)3; return x < y")); + assertEquals(false, exec("float x = (float)6; def y = (long)2; return x < y")); + assertEquals(false, exec("double x = (double)7; def y = (long)1; return x < y")); + + assertEquals(true, exec("byte x = (byte)1; def y = (float)7; return x < y")); + assertEquals(true, exec("short x = (short)2; def y = (float)6; return x < y")); + assertEquals(true, exec("char x = (char)3; def y = (float)5; return x < y")); + assertEquals(false, exec("int x = (int)4; def y = (float)4; return x < y")); + assertEquals(false, exec("long x = (long)5; def y = (float)3; return x < y")); + assertEquals(false, exec("float x = (float)6; def y = (float)2; return x < y")); + assertEquals(false, exec("double x = (double)7; def y = (float)1; return x < y")); + assertEquals(true, exec("byte x = (byte)1; def y = (double)7; return x < y")); assertEquals(true, exec("short x = (short)2; def y = (double)6; return x < y")); assertEquals(true, exec("char x = (char)3; def y = (double)5; return x < y")); @@ -258,6 +435,22 @@ public void testDefLtTypedRHS() { assertEquals(false, exec("def x = (float)6; int y = (int)2; return x < y")); assertEquals(false, exec("def x = (double)7; int y = (int)1; return x < y")); + assertEquals(true, exec("def x = (byte)1; long y = (long)7; return x < y")); + assertEquals(true, exec("def x = (short)2; long y = (long)6; return x < y")); + assertEquals(true, exec("def x = (char)3; long y = (long)5; return x < y")); + assertEquals(false, exec("def x = (int)4; long y = (long)4; return x < y")); + assertEquals(false, exec("def x = (long)5; long y = (long)3; return x < y")); + assertEquals(false, exec("def x = (float)6; long y = (long)2; return x < y")); + assertEquals(false, exec("def x = (double)7; long y = (long)1; return x < y")); + + assertEquals(true, exec("def x = (byte)1; float y = (float)7; return x < y")); + assertEquals(true, exec("def x = (short)2; float y = (float)6; return x < y")); + assertEquals(true, exec("def x = (char)3; float y = (float)5; return x < y")); + assertEquals(false, exec("def x = (int)4; float y = (float)4; return x < y")); + assertEquals(false, exec("def x = (long)5; float y = (float)3; return x < y")); + assertEquals(false, exec("def x = (float)6; float y = (float)2; return x < y")); + assertEquals(false, exec("def x = (double)7; float y = (float)1; return x < y")); + assertEquals(true, exec("def x = (byte)1; double y = (double)7; return x < y")); assertEquals(true, exec("def x = (short)2; double y = (double)6; return x < y")); assertEquals(true, exec("def x = (char)3; double y = (double)5; return x < y")); @@ -276,6 +469,22 @@ public void testDefLte() { assertEquals(false, exec("def x = (float)6; def y = (int)2; return x <= y")); assertEquals(false, exec("def x = (double)7; def y = (int)1; return x <= y")); + assertEquals(true, exec("def x = (byte)1; def y = (long)7; return x <= y")); + assertEquals(true, exec("def x = (short)2; def y = (long)6; return x <= y")); + assertEquals(true, exec("def x = (char)3; def y = (long)5; return x <= y")); + assertEquals(true, exec("def x = (int)4; def y = (long)4; return x <= y")); + assertEquals(false, exec("def x = (long)5; def y = (long)3; return x <= y")); + assertEquals(false, exec("def x = (float)6; def y = (long)2; return x <= y")); + assertEquals(false, exec("def x = (double)7; def y = (long)1; return x <= y")); + + assertEquals(true, exec("def x = (byte)1; def y = (float)7; return x <= y")); + assertEquals(true, exec("def x = (short)2; def y = (float)6; return x <= y")); + assertEquals(true, exec("def x = (char)3; def y = (float)5; return x <= y")); + assertEquals(true, exec("def x = (int)4; def y = (float)4; return x <= y")); + assertEquals(false, exec("def x = (long)5; def y = (float)3; return x <= y")); + assertEquals(false, exec("def x = (float)6; def y = (float)2; return x <= y")); + assertEquals(false, exec("def x = (double)7; def y = (float)1; return x <= y")); + assertEquals(true, exec("def x = (byte)1; def y = (double)7; return x <= y")); assertEquals(true, exec("def x = (short)2; def y = (double)6; return x <= y")); assertEquals(true, exec("def x = (char)3; def y = (double)5; return x <= y")); @@ -294,6 +503,22 @@ public void testDefLteTypedLHS() { assertEquals(false, exec("float x = (float)6; def y = (int)2; return x <= y")); assertEquals(false, exec("double x = (double)7; def y = (int)1; return x <= y")); + assertEquals(true, exec("byte x = (byte)1; def y = (long)7; return x <= y")); + assertEquals(true, exec("short x = (short)2; def y = (long)6; return x <= y")); + assertEquals(true, exec("char x = (char)3; def y = (long)5; return x <= y")); + assertEquals(true, exec("int x = (int)4; def y = (long)4; return x <= y")); + assertEquals(false, exec("long x = (long)5; def y = (long)3; return x <= y")); + assertEquals(false, exec("float x = (float)6; def y = (long)2; return x <= y")); + assertEquals(false, exec("double x = (double)7; def y = (long)1; return x <= y")); + + assertEquals(true, exec("byte x = (byte)1; def y = (float)7; return x <= y")); + assertEquals(true, exec("short x = (short)2; def y = (float)6; return x <= y")); + assertEquals(true, exec("char x = (char)3; def y = (float)5; return x <= y")); + assertEquals(true, exec("int x = (int)4; def y = (float)4; return x <= y")); + assertEquals(false, exec("long x = (long)5; def y = (float)3; return x <= y")); + assertEquals(false, exec("float x = (float)6; def y = (float)2; return x <= y")); + assertEquals(false, exec("double x = (double)7; def y = (float)1; return x <= y")); + assertEquals(true, exec("byte x = (byte)1; def y = (double)7; return x <= y")); assertEquals(true, exec("short x = (short)2; def y = (double)6; return x <= y")); assertEquals(true, exec("char x = (char)3; def y = (double)5; return x <= y")); @@ -312,6 +537,22 @@ public void testDefLteTypedRHS() { assertEquals(false, exec("def x = (float)6; int y = (int)2; return x <= y")); assertEquals(false, exec("def x = (double)7; int y = (int)1; return x <= y")); + assertEquals(true, exec("def x = (byte)1; long y = (long)7; return x <= y")); + assertEquals(true, exec("def x = (short)2; long y = (long)6; return x <= y")); + assertEquals(true, exec("def x = (char)3; long y = (long)5; return x <= y")); + assertEquals(true, exec("def x = (int)4; long y = (long)4; return x <= y")); + assertEquals(false, exec("def x = (long)5; long y = (long)3; return x <= y")); + assertEquals(false, exec("def x = (float)6; long y = (long)2; return x <= y")); + assertEquals(false, exec("def x = (double)7; long y = (long)1; return x <= y")); + + assertEquals(true, exec("def x = (byte)1; float y = (float)7; return x <= y")); + assertEquals(true, exec("def x = (short)2; float y = (float)6; return x <= y")); + assertEquals(true, exec("def x = (char)3; float y = (float)5; return x <= y")); + assertEquals(true, exec("def x = (int)4; float y = (float)4; return x <= y")); + assertEquals(false, exec("def x = (long)5; float y = (float)3; return x <= y")); + assertEquals(false, exec("def x = (float)6; float y = (float)2; return x <= y")); + assertEquals(false, exec("def x = (double)7; float y = (float)1; return x <= y")); + assertEquals(true, exec("def x = (byte)1; double y = (double)7; return x <= y")); assertEquals(true, exec("def x = (short)2; double y = (double)6; return x <= y")); assertEquals(true, exec("def x = (char)3; double y = (double)5; return x <= y")); @@ -330,6 +571,22 @@ public void testDefGt() { assertEquals(true, exec("def x = (float)6; def y = (int)2; return x > y")); assertEquals(true, exec("def x = (double)7; def y = (int)1; return x > y")); + assertEquals(false, exec("def x = (byte)1; def y = (long)7; return x > y")); + assertEquals(false, exec("def x = (short)2; def y = (long)6; return x > y")); + assertEquals(false, exec("def x = (char)3; def y = (long)5; return x > y")); + assertEquals(false, exec("def x = (int)4; def y = (long)4; return x > y")); + assertEquals(true, exec("def x = (long)5; def y = (long)3; return x > y")); + assertEquals(true, exec("def x = (float)6; def y = (long)2; return x > y")); + assertEquals(true, exec("def x = (double)7; def y = (long)1; return x > y")); + + assertEquals(false, exec("def x = (byte)1; def y = (float)7; return x > y")); + assertEquals(false, exec("def x = (short)2; def y = (float)6; return x > y")); + assertEquals(false, exec("def x = (char)3; def y = (float)5; return x > y")); + assertEquals(false, exec("def x = (int)4; def y = (float)4; return x > y")); + assertEquals(true, exec("def x = (long)5; def y = (float)3; return x > y")); + assertEquals(true, exec("def x = (float)6; def y = (float)2; return x > y")); + assertEquals(true, exec("def x = (double)7; def y = (float)1; return x > y")); + assertEquals(false, exec("def x = (byte)1; def y = (double)7; return x > y")); assertEquals(false, exec("def x = (short)2; def y = (double)6; return x > y")); assertEquals(false, exec("def x = (char)3; def y = (double)5; return x > y")); @@ -348,6 +605,22 @@ public void testDefGtTypedLHS() { assertEquals(true, exec("float x = (float)6; def y = (int)2; return x > y")); assertEquals(true, exec("double x = (double)7; def y = (int)1; return x > y")); + assertEquals(false, exec("byte x = (byte)1; def y = (long)7; return x > y")); + assertEquals(false, exec("short x = (short)2; def y = (long)6; return x > y")); + assertEquals(false, exec("char x = (char)3; def y = (long)5; return x > y")); + assertEquals(false, exec("int x = (int)4; def y = (long)4; return x > y")); + assertEquals(true, exec("long x = (long)5; def y = (long)3; return x > y")); + assertEquals(true, exec("float x = (float)6; def y = (long)2; return x > y")); + assertEquals(true, exec("double x = (double)7; def y = (long)1; return x > y")); + + assertEquals(false, exec("byte x = (byte)1; def y = (float)7; return x > y")); + assertEquals(false, exec("short x = (short)2; def y = (float)6; return x > y")); + assertEquals(false, exec("char x = (char)3; def y = (float)5; return x > y")); + assertEquals(false, exec("int x = (int)4; def y = (float)4; return x > y")); + assertEquals(true, exec("long x = (long)5; def y = (float)3; return x > y")); + assertEquals(true, exec("float x = (float)6; def y = (float)2; return x > y")); + assertEquals(true, exec("double x = (double)7; def y = (float)1; return x > y")); + assertEquals(false, exec("byte x = (byte)1; def y = (double)7; return x > y")); assertEquals(false, exec("short x = (short)2; def y = (double)6; return x > y")); assertEquals(false, exec("char x = (char)3; def y = (double)5; return x > y")); @@ -366,6 +639,22 @@ public void testDefGtTypedRHS() { assertEquals(true, exec("def x = (float)6; int y = (int)2; return x > y")); assertEquals(true, exec("def x = (double)7; int y = (int)1; return x > y")); + assertEquals(false, exec("def x = (byte)1; long y = (long)7; return x > y")); + assertEquals(false, exec("def x = (short)2; long y = (long)6; return x > y")); + assertEquals(false, exec("def x = (char)3; long y = (long)5; return x > y")); + assertEquals(false, exec("def x = (int)4; long y = (long)4; return x > y")); + assertEquals(true, exec("def x = (long)5; long y = (long)3; return x > y")); + assertEquals(true, exec("def x = (float)6; long y = (long)2; return x > y")); + assertEquals(true, exec("def x = (double)7; long y = (long)1; return x > y")); + + assertEquals(false, exec("def x = (byte)1; float y = (float)7; return x > y")); + assertEquals(false, exec("def x = (short)2; float y = (float)6; return x > y")); + assertEquals(false, exec("def x = (char)3; float y = (float)5; return x > y")); + assertEquals(false, exec("def x = (int)4; float y = (float)4; return x > y")); + assertEquals(true, exec("def x = (long)5; float y = (float)3; return x > y")); + assertEquals(true, exec("def x = (float)6; float y = (float)2; return x > y")); + assertEquals(true, exec("def x = (double)7; float y = (float)1; return x > y")); + assertEquals(false, exec("def x = (byte)1; double y = (double)7; return x > y")); assertEquals(false, exec("def x = (short)2; double y = (double)6; return x > y")); assertEquals(false, exec("def x = (char)3; double y = (double)5; return x > y")); @@ -384,6 +673,22 @@ public void testDefGte() { assertEquals(true, exec("def x = (float)6; def y = (int)2; return x >= y")); assertEquals(true, exec("def x = (double)7; def y = (int)1; return x >= y")); + assertEquals(false, exec("def x = (byte)1; def y = (long)7; return x >= y")); + assertEquals(false, exec("def x = (short)2; def y = (long)6; return x >= y")); + assertEquals(false, exec("def x = (char)3; def y = (long)5; return x >= y")); + assertEquals(true, exec("def x = (int)4; def y = (long)4; return x >= y")); + assertEquals(true, exec("def x = (long)5; def y = (long)3; return x >= y")); + assertEquals(true, exec("def x = (float)6; def y = (long)2; return x >= y")); + assertEquals(true, exec("def x = (double)7; def y = (long)1; return x >= y")); + + assertEquals(false, exec("def x = (byte)1; def y = (float)7; return x >= y")); + assertEquals(false, exec("def x = (short)2; def y = (float)6; return x >= y")); + assertEquals(false, exec("def x = (char)3; def y = (float)5; return x >= y")); + assertEquals(true, exec("def x = (int)4; def y = (float)4; return x >= y")); + assertEquals(true, exec("def x = (long)5; def y = (float)3; return x >= y")); + assertEquals(true, exec("def x = (float)6; def y = (float)2; return x >= y")); + assertEquals(true, exec("def x = (double)7; def y = (float)1; return x >= y")); + assertEquals(false, exec("def x = (byte)1; def y = (double)7; return x >= y")); assertEquals(false, exec("def x = (short)2; def y = (double)6; return x >= y")); assertEquals(false, exec("def x = (char)3; def y = (double)5; return x >= y")); @@ -402,6 +707,22 @@ public void testDefGteTypedLHS() { assertEquals(true, exec("float x = (float)6; def y = (int)2; return x >= y")); assertEquals(true, exec("double x = (double)7; def y = (int)1; return x >= y")); + assertEquals(false, exec("byte x = (byte)1; def y = (long)7; return x >= y")); + assertEquals(false, exec("short x = (short)2; def y = (long)6; return x >= y")); + assertEquals(false, exec("char x = (char)3; def y = (long)5; return x >= y")); + assertEquals(true, exec("int x = (int)4; def y = (long)4; return x >= y")); + assertEquals(true, exec("long x = (long)5; def y = (long)3; return x >= y")); + assertEquals(true, exec("float x = (float)6; def y = (long)2; return x >= y")); + assertEquals(true, exec("double x = (double)7; def y = (long)1; return x >= y")); + + assertEquals(false, exec("byte x = (byte)1; def y = (float)7; return x >= y")); + assertEquals(false, exec("short x = (short)2; def y = (float)6; return x >= y")); + assertEquals(false, exec("char x = (char)3; def y = (float)5; return x >= y")); + assertEquals(true, exec("int x = (int)4; def y = (float)4; return x >= y")); + assertEquals(true, exec("long x = (long)5; def y = (float)3; return x >= y")); + assertEquals(true, exec("float x = (float)6; def y = (float)2; return x >= y")); + assertEquals(true, exec("double x = (double)7; def y = (float)1; return x >= y")); + assertEquals(false, exec("byte x = (byte)1; def y = (double)7; return x >= y")); assertEquals(false, exec("short x = (short)2; def y = (double)6; return x >= y")); assertEquals(false, exec("char x = (char)3; def y = (double)5; return x >= y")); @@ -420,6 +741,22 @@ public void testDefGteTypedRHS() { assertEquals(true, exec("def x = (float)6; int y = (int)2; return x >= y")); assertEquals(true, exec("def x = (double)7; int y = (int)1; return x >= y")); + assertEquals(false, exec("def x = (byte)1; long y = (long)7; return x >= y")); + assertEquals(false, exec("def x = (short)2; long y = (long)6; return x >= y")); + assertEquals(false, exec("def x = (char)3; long y = (long)5; return x >= y")); + assertEquals(true, exec("def x = (int)4; long y = (long)4; return x >= y")); + assertEquals(true, exec("def x = (long)5; long y = (long)3; return x >= y")); + assertEquals(true, exec("def x = (float)6; long y = (long)2; return x >= y")); + assertEquals(true, exec("def x = (double)7; long y = (long)1; return x >= y")); + + assertEquals(false, exec("def x = (byte)1; float y = (float)7; return x >= y")); + assertEquals(false, exec("def x = (short)2; float y = (float)6; return x >= y")); + assertEquals(false, exec("def x = (char)3; float y = (float)5; return x >= y")); + assertEquals(true, exec("def x = (int)4; float y = (float)4; return x >= y")); + assertEquals(true, exec("def x = (long)5; float y = (float)3; return x >= y")); + assertEquals(true, exec("def x = (float)6; float y = (float)2; return x >= y")); + assertEquals(true, exec("def x = (double)7; float y = (float)1; return x >= y")); + assertEquals(false, exec("def x = (byte)1; double y = (double)7; return x >= y")); assertEquals(false, exec("def x = (short)2; double y = (double)6; return x >= y")); assertEquals(false, exec("def x = (char)3; double y = (double)5; return x >= y")); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefOptimizationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefOptimizationTests.java index 5973cf8b3728..ffbe91bd4603 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefOptimizationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefOptimizationTests.java @@ -416,4 +416,39 @@ public void testLambdaArguments() { "synthetic lambda$synthetic$0(D)D" ); } + + public void testPrimitiveArrayIteration() { + assertBytecodeExists( + "def x = new boolean[] { true, false }; boolean s = false; for (boolean l : x) s |= l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextBoolean ()Z" + ); + assertBytecodeExists( + "def x = new byte[] { (byte)10, (byte)20 }; byte s = 0; for (byte l : x) s += l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextByte ()B" + ); + assertBytecodeExists( + "def x = new short[] { (short)100, (short)200 }; short s = 0; for (short l : x) s += l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextShort ()S" + ); + assertBytecodeExists( + "def x = new char[] { (char)'a', (char)'b' }; char s = 0; for (char l : x) s = l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextChar ()C" + ); + assertBytecodeExists( + "def x = new int[] { 100, 200 }; int s = 0; for (int l : x) s += l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextInt ()I" + ); + assertBytecodeExists( + "def x = new long[] { 100, 200 }; long s = 0; for (long l : x) s += l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextLong ()J" + ); + assertBytecodeExists( + "def x = new float[] { 100, 200 }; float s = 0; for (float l : x) s += l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextFloat ()F" + ); + assertBytecodeExists( + "def x = new double[] { 100, 200 }; double s = 0; for (double l : x) s += l; return s", + "INVOKEINTERFACE org/elasticsearch/painless/api/ValueIterator.nextDouble ()D" + ); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/GetByPathAugmentationTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GetByPathAugmentationTests.java index 8e942f500c68..3022eb0fccc8 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/GetByPathAugmentationTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/GetByPathAugmentationTests.java @@ -8,10 +8,11 @@ package org.elasticsearch.painless; +import org.elasticsearch.core.Strings; + import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import java.util.Locale; import java.util.Map; public class GetByPathAugmentationTests extends ScriptTestCase { @@ -38,20 +39,20 @@ public GetByPathAugmentationTests() { } private String toScript(String collection, String key) { - return String.format(Locale.ROOT, "return %s.getByPath('%s')", collection, key); + return Strings.format("return %s.getByPath('%s')", collection, key); } private String toScript(String collection, String key, String defaultValue) { - return String.format(Locale.ROOT, "return %s.getByPath('%s', %s)", collection, key, defaultValue); + return Strings.format("return %s.getByPath('%s', %s)", collection, key, defaultValue); } private String numberFormat(String unparsable, String path, int i) { String format = "Could not parse [%s] as a int index into list at path [%s] and index [%d]"; - return String.format(Locale.ROOT, format, unparsable, path, i); + return Strings.format(format, unparsable, path, i); } private String missingValue(String path) { - return formatted("Could not find value at path [%s]", path); + return Strings.format("Could not find value at path [%s]", path); } private void assertPathValue(String collection, String key, Object value) { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 5177da3f6dd0..3aee371bec45 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -16,7 +16,8 @@ import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.ArrayList; @@ -24,6 +25,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -110,9 +112,8 @@ public void testReturnSource() throws IOException { SearchLookup lookup = mock(SearchLookup.class); LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafLookup); - SourceLookup sourceLookup = mock(SourceLookup.class); - when(leafLookup.asMap()).thenReturn(Collections.singletonMap("_source", sourceLookup)); - when(sourceLookup.source()).thenReturn(Collections.singletonMap("test", 1)); + Supplier source = () -> Source.fromMap(Map.of("test", 1), XContentType.JSON); + when(leafLookup.asMap()).thenReturn(Collections.singletonMap("_source", source)); ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, lookup); ScriptedMetricAggContexts.MapScript script = leafFactory.newInstance(leafReaderContext); @@ -141,9 +142,8 @@ public void testMapSourceAccess() throws IOException { SearchLookup lookup = mock(SearchLookup.class); LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafLookup); - SourceLookup sourceLookup = mock(SourceLookup.class); - when(leafLookup.asMap()).thenReturn(Collections.singletonMap("_source", sourceLookup)); - when(sourceLookup.source()).thenReturn(Collections.singletonMap("three", 3)); + Supplier source = () -> Source.fromMap(Map.of("three", 3), XContentType.JSON); + when(leafLookup.asMap()).thenReturn(Collections.singletonMap("_source", source)); ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, lookup); ScriptedMetricAggContexts.MapScript script = leafFactory.newInstance(leafReaderContext); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java index 93ec854eb7e2..402947de3753 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/StringTests.java @@ -8,13 +8,13 @@ package org.elasticsearch.painless; -import org.apache.lucene.util.Constants; +import org.elasticsearch.core.Strings; import java.util.HashMap; import java.util.Map; import static java.util.Collections.singletonMap; -import static org.elasticsearch.painless.WriterConstants.MAX_INDY_STRING_CONCAT_ARGS; +import static org.elasticsearch.painless.WriterConstants.MAX_STRING_CONCAT_ARGS; public class StringTests extends ScriptTestCase { @@ -65,7 +65,7 @@ public void testAppendMultiple() { } public void testAppendMany() { - for (int i = MAX_INDY_STRING_CONCAT_ARGS - 5; i < MAX_INDY_STRING_CONCAT_ARGS + 5; i++) { + for (int i = MAX_STRING_CONCAT_ARGS - 5; i < MAX_STRING_CONCAT_ARGS + 5; i++) { doTestAppendMany(i); } } @@ -74,14 +74,14 @@ private void doTestAppendMany(int count) { StringBuilder script = new StringBuilder("String s = \"cat\"; return s"); StringBuilder result = new StringBuilder("cat"); for (int i = 1; i < count; i++) { - final String s = formatted("%03d", i); + final String s = Strings.format("%03d", i); script.append(" + '").append(s).append("'.toString()"); result.append(s); } final String s = script.toString(); assertTrue( "every string part should be separately pushed to stack.", - Debugger.toString(s).contains(formatted("LDC \"%03d\"", count / 2)) + Debugger.toString(s).contains(Strings.format("LDC \"%03d\"", count / 2)) ); assertEquals(result.toString(), exec(s)); } @@ -234,18 +234,14 @@ public void testBase64Augmentations() { assertEquals(rando, exec("params.rando.encodeBase64().decodeBase64()", singletonMap("rando", rando), true)); } - public void testJava9ConstantStringConcatBytecode() { - assumeTrue("Needs Java 9 to test indified String concat", Constants.JRE_IS_MINIMUM_JAVA9); - assertNotNull(WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE); + public void testConstantStringConcatBytecode() { assertBytecodeExists( "String s = \"cat\"; return s + true + 'abc' + null;", "INVOKEDYNAMIC concat(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;" ); } - public void testJava9StringConcatBytecode() { - assumeTrue("Needs Java 9 to test indified String concat", Constants.JRE_IS_MINIMUM_JAVA9); - assertNotNull(WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE); + public void testStringConcatBytecode() { assertBytecodeExists( "String s = \"cat\"; boolean t = true; Object u = null; return s + t + 'abc' + u;", "INVOKEDYNAMIC concat(Ljava/lang/String;ZLjava/lang/String;Ljava/lang/Object;)Ljava/lang/String;" @@ -260,10 +256,4 @@ public void testNullStringConcat() { assertEquals("" + 2 + null, exec("2 + '' + null")); assertEquals("" + null + 2, exec("null + '' + 2")); } - - public void testJava9NullStringConcatBytecode() { - assumeTrue("Needs Java 9 to test indified String concat", Constants.JRE_IS_MINIMUM_JAVA9); - assertNotNull(WriterConstants.INDY_STRING_CONCAT_BOOTSTRAP_HANDLE); - assertEquals("" + null + null, exec("'' + null + null")); - } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index 77c2bb630c3d..7050bccffa06 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -840,4 +840,12 @@ public void testInstanceMethodNotFound() { iae = expectScriptThrows(IllegalArgumentException.class, () -> exec("doesNotExist(1, 'string', false)")); assertEquals(iae.getMessage(), "Unknown call [doesNotExist] with [3] arguments."); } + + public void testArrayToArrayException() { + IllegalArgumentException iae = expectScriptThrows( + IllegalArgumentException.class, + () -> exec("return new String[] {'a'}.noMethod()") + ); + assertTrue(iae.getMessage().contains("member method") && iae.getMessage().contains("not found")); + } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/ContextInfoTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/ContextInfoTests.java index babf2914343b..b7aa1c117233 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/ContextInfoTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/ContextInfoTests.java @@ -172,6 +172,11 @@ protected PainlessContextInfo createTestInstance() { return new PainlessContextInfo(randomAlphaOfLength(20), classes, importedMethods, classBindings, instanceBindings); } + @Override + protected PainlessContextInfo mutateInstance(PainlessContextInfo instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + @Override protected Writeable.Reader instanceReader() { return PainlessContextInfo::new; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java index 7d957cca4d0b..7505f75293a3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteRequestTests.java @@ -75,6 +75,11 @@ protected PainlessExecuteAction.Request createTestInstance() { return new PainlessExecuteAction.Request(script, context != null ? context.name : null, contextSetup); } + @Override + protected PainlessExecuteAction.Request mutateInstance(PainlessExecuteAction.Request instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + @Override protected Writeable.Reader instanceReader() { return PainlessExecuteAction.Request::new; diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java index 6cb9f5a74ae0..20db938bdfa7 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/action/PainlessExecuteResponseTests.java @@ -31,6 +31,11 @@ protected PainlessExecuteAction.Response createTestInstance() { return new PainlessExecuteAction.Response(result); } + @Override + protected PainlessExecuteAction.Response mutateInstance(PainlessExecuteAction.Response instance) { + return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 + } + @Override protected PainlessExecuteAction.Response doParseInstance(XContentParser parser) throws IOException { parser.nextToken(); // START-OBJECT diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml index 1c934b842171..161e4cee74b3 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/130_metric_agg.yml @@ -9,6 +9,8 @@ setup: properties: double: type: double + keyword: + type: keyword - do: cluster.health: @@ -20,6 +22,7 @@ setup: id: "1" body: double: 1.0 + keyword: "first" - do: index: @@ -27,6 +30,7 @@ setup: id: "2" body: double: 1.0 + keyword: "second" - do: index: @@ -34,6 +38,7 @@ setup: id: "3" body: double: 2.0 + keyword: "third" - do: indices.refresh: {} @@ -61,3 +66,49 @@ setup: - match: { hits.total: 3 } - match: { aggregations.total.value: 4.0 } +--- +"Scripted Metric Agg Non String Map Key": + + - do: + search: + rest_total_hits_as_int: true + body: { + "size": 0, + "aggs": { + "total": { + "scripted_metric": { + "init_script": "state.transactions = [:]", + "map_script": "state.transactions[doc['double'].value] = doc['double'].value", + "combine_script": "return state.transactions", + "reduce_script": "double sum = 0; for (transactions in states) { for (entry in transactions.entrySet()) { sum += entry.getKey() - entry.getValue() } } return sum" + } + } + } + } + + - match: { hits.total: 3 } + - match: { aggregations.total.value: 0.0 } + +--- +"Scripted Metric Agg String Map Key": + + - do: + search: + rest_total_hits_as_int: true + body: { + "size": 0, + "aggs": { + "total": { + "scripted_metric": { + "init_script": "state.transactions = [:]", + "map_script": "state.transactions[doc['keyword'].value] = doc['double'].value", + "combine_script": "return state.transactions", + "reduce_script": "double sum = 0; for (transactions in states) { for (entry in transactions.entrySet()) { sum += entry.getValue() } } return sum" + } + } + } + } + + - match: { hits.total: 3 } + - match: { aggregations.total.value: 4.0 } + diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index a9db8325e1db..62e7744bfa7f 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -8,8 +8,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java new file mode 100644 index 000000000000..badc2dd568f5 --- /dev/null +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/RankFeaturesMapperIntegrationIT.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class RankFeaturesMapperIntegrationIT extends ESIntegTestCase { + + private static final String LOWER_RANKED_FEATURE = "ten"; + private static final String HIGHER_RANKED_FEATURE = "twenty"; + private static final String INDEX_NAME = "rank_feature_test"; + private static final String FIELD_NAME = "all_rank_features"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MapperExtrasPlugin.class); + } + + public void testRankFeaturesTermQuery() throws IOException { + init(); + SearchResponse response = client().prepareSearch(INDEX_NAME) + .setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE)) + .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(20f)); + } + + response = client().prepareSearch(INDEX_NAME) + .setQuery(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE).boost(100f)) + .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(2L)); + for (SearchHit hit : response.getHits().getHits()) { + assertThat(hit.getScore(), equalTo(2000f)); + } + + response = client().prepareSearch(INDEX_NAME) + .setQuery( + QueryBuilders.boolQuery() + .should(QueryBuilders.termQuery(FIELD_NAME, HIGHER_RANKED_FEATURE)) + .should(QueryBuilders.termQuery(FIELD_NAME, LOWER_RANKED_FEATURE).boost(3f)) + .minimumShouldMatch(1) + ) + .get(); + assertThat(response.getHits().getTotalHits().value, equalTo(3L)); + for (SearchHit hit : response.getHits().getHits()) { + if (hit.getId().equals("all")) { + assertThat(hit.getScore(), equalTo(50f)); + } + if (hit.getId().equals("lower")) { + assertThat(hit.getScore(), equalTo(30f)); + } + if (hit.getId().equals("higher")) { + assertThat(hit.getScore(), equalTo(20f)); + } + } + + response = client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.termQuery(FIELD_NAME, "missing_feature")).get(); + assertThat(response.getHits().getTotalHits().value, equalTo(0L)); + } + + private void init() throws IOException { + Settings.Builder settings = Settings.builder(); + settings.put(indexSettings()); + prepareCreate(INDEX_NAME).setSettings(settings) + .setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("all_rank_features") + .field("type", "rank_features") + .endObject() + .endObject() + .endObject() + .endObject() + ) + .get(); + ensureGreen(); + + BulkResponse bulk = client().prepareBulk() + .add( + client().prepareIndex(INDEX_NAME) + .setId("all") + .setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10, HIGHER_RANKED_FEATURE, 20))) + ) + .add(client().prepareIndex(INDEX_NAME).setId("lower").setSource(Map.of("all_rank_features", Map.of(LOWER_RANKED_FEATURE, 10)))) + .add( + client().prepareIndex(INDEX_NAME).setId("higher").setSource(Map.of("all_rank_features", Map.of(HIGHER_RANKED_FEATURE, 20))) + ) + .get(); + assertFalse(bulk.buildFailureMessage(), bulk.hasFailures()); + assertThat(refresh().getFailedShards(), equalTo(0)); + } + +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index 08f39c583e2f..c6298f1eaf81 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -53,7 +53,7 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.field.TextDocValuesField; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.SourceProvider; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -211,14 +211,13 @@ private Function, IOException }; }; } - SourceLookup sourceLookup = searchExecutionContext.lookup().source(); ValueFetcher valueFetcher = valueFetcher(searchExecutionContext, null); + SourceProvider sourceProvider = searchExecutionContext.lookup(); return context -> { valueFetcher.setNextReader(context); return docID -> { try { - sourceLookup.setSegmentAndDocument(context, docID); - return valueFetcher.fetchValues(sourceLookup, docID, new ArrayList<>()); + return valueFetcher.fetchValues(sourceProvider.getSource(context, docID), docID, new ArrayList<>()); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -338,7 +337,7 @@ protected BytesRef storedToBytesRef(Object stored) { name(), CoreValuesSourceType.KEYWORD, SourceValueFetcher.toString(fieldDataContext.sourcePathsLookup().apply(name())), - fieldDataContext.lookupSupplier().get().source(), + fieldDataContext.lookupSupplier().get(), TextDocValuesField::new ); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java index 25f078653411..28424a9e5289 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java @@ -11,7 +11,7 @@ import org.apache.lucene.document.FeatureField; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MappedFieldType; @@ -413,7 +413,7 @@ protected int doHashCode() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java index aea9fe9b2493..4187963e061c 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeaturesFieldMapper.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.FeatureField; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -27,6 +28,8 @@ import java.io.IOException; import java.util.Map; +import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; + /** * A {@link FieldMapper} that exposes Lucene's {@link FeatureField} as a sparse * vector of features. @@ -77,7 +80,7 @@ public static final class RankFeaturesFieldType extends MappedFieldType { private final boolean positiveScoreImpact; public RankFeaturesFieldType(String name, Map meta, boolean positiveScoreImpact) { - super(name, false, false, false, TextSearchInfo.NONE, meta); + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); this.positiveScoreImpact = positiveScoreImpact; } @@ -86,10 +89,6 @@ public String typeName() { return CONTENT_TYPE; } - public boolean positiveScoreImpact() { - return positiveScoreImpact; - } - @Override public Query existsQuery(SearchExecutionContext context) { throw new IllegalArgumentException("[rank_features] fields do not support [exists] queries"); @@ -107,7 +106,14 @@ public ValueFetcher valueFetcher(SearchExecutionContext context, String format) @Override public Query termQuery(Object value, SearchExecutionContext context) { - throw new IllegalArgumentException("Queries on [rank_features] fields are not supported"); + return FeatureField.newLinearQuery(name(), indexedValueForSearch(value), DEFAULT_BOOST); + } + + private static String indexedValueForSearch(Object value) { + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } + return value.toString(); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index ae088d5fed02..a88b4592448b 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.FormattedDocValues; @@ -78,7 +79,7 @@ private static ScaledFloatFieldMapper toType(FieldMapper in) { public static class Builder extends FieldMapper.Builder { - private final Parameter indexed = Parameter.indexParam(m -> toType(m).indexed, true); + private final Parameter indexed; private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, true); private final Parameter stored = Parameter.storeParam(m -> toType(m).stored, false); @@ -120,11 +121,13 @@ public static class Builder extends FieldMapper.Builder { */ private final Parameter metric; - public Builder(String name, Settings settings) { - this(name, IGNORE_MALFORMED_SETTING.get(settings), COERCE_SETTING.get(settings)); + private final IndexMode indexMode; + + public Builder(String name, Settings settings, IndexMode indexMode) { + this(name, IGNORE_MALFORMED_SETTING.get(settings), COERCE_SETTING.get(settings), indexMode); } - public Builder(String name, boolean ignoreMalformedByDefault, boolean coerceByDefault) { + public Builder(String name, boolean ignoreMalformedByDefault, boolean coerceByDefault, IndexMode indexMode) { super(name); this.ignoreMalformed = Parameter.explicitBoolParam( "ignore_malformed", @@ -133,11 +136,19 @@ public Builder(String name, boolean ignoreMalformedByDefault, boolean coerceByDe ignoreMalformedByDefault ); this.coerce = Parameter.explicitBoolParam("coerce", true, m -> toType(m).coerce, coerceByDefault); - + this.indexMode = indexMode; + this.indexed = Parameter.indexParam(m -> toType(m).indexed, () -> { + if (indexMode == IndexMode.TIME_SERIES) { + var metricType = getMetric().getValue(); + return metricType != TimeSeriesParams.MetricType.COUNTER && metricType != TimeSeriesParams.MetricType.GAUGE; + } else { + return true; + } + }); this.metric = TimeSeriesParams.metricParam( m -> toType(m).metricType, - TimeSeriesParams.MetricType.gauge, - TimeSeriesParams.MetricType.counter + TimeSeriesParams.MetricType.GAUGE, + TimeSeriesParams.MetricType.COUNTER ).addValidator(v -> { if (v != null && hasDocValues.getValue() == false) { throw new IllegalArgumentException( @@ -162,6 +173,10 @@ public Builder metric(TimeSeriesParams.MetricType metric) { return this; } + private Parameter getMetric() { + return metric; + } + @Override protected Parameter[] getParameters() { return new Parameter[] { indexed, hasDocValues, stored, ignoreMalformed, meta, scalingFactor, coerce, nullValue, metric }; @@ -183,7 +198,7 @@ public ScaledFloatFieldMapper build(MapperBuilderContext context) { } } - public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.getSettings())); + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.getSettings(), c.getIndexSettings().getMode())); public static final class ScaledFloatFieldType extends SimpleMappedFieldType { @@ -287,7 +302,7 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext failIfNoDocValues(); } - ValuesSourceType valuesSourceType = metricType == TimeSeriesParams.MetricType.counter + ValuesSourceType valuesSourceType = metricType == TimeSeriesParams.MetricType.COUNTER ? TimeSeriesValuesSourceType.COUNTER : IndexNumericFieldData.NumericType.LONG.getValuesSourceType(); if ((operation == FielddataOperation.SEARCH || operation == FielddataOperation.SCRIPT) && hasDocValues()) { @@ -310,7 +325,7 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext name(), valuesSourceType, sourceValueFetcher(sourcePaths), - searchLookup.source(), + searchLookup, ScaledFloatDocValuesField::new ); } @@ -410,6 +425,7 @@ public String toString() { private final boolean ignoreMalformedByDefault; private final boolean coerceByDefault; private final TimeSeriesParams.MetricType metricType; + private final IndexMode indexMode; private ScaledFloatFieldMapper( String simpleName, @@ -429,6 +445,7 @@ private ScaledFloatFieldMapper( this.ignoreMalformedByDefault = builder.ignoreMalformed.getDefaultValue().value(); this.coerceByDefault = builder.coerce.getDefaultValue().value(); this.metricType = builder.metric.getValue(); + this.indexMode = builder.indexMode; } boolean coerce() { @@ -452,7 +469,7 @@ protected String contentType() { @Override public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName(), ignoreMalformedByDefault, coerceByDefault).metric(metricType).init(this); + return new Builder(simpleName(), ignoreMalformedByDefault, coerceByDefault, indexMode).metric(metricType).init(this); } @Override diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilderTests.java index 9a03ecb7ec41..32446c3c2739 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilderTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilderTests.java @@ -111,7 +111,7 @@ public void testDefaultScoreFunction() throws IOException { } public void testIllegalField() { - String query = formatted(""" + String query = Strings.format(""" { "rank_feature" : { "field": "%s" diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index c30540d3106d..4e8e321a72af 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -10,9 +10,12 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; @@ -20,6 +23,7 @@ import org.elasticsearch.index.mapper.MapperTestCase; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; @@ -34,6 +38,7 @@ import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ScaledFloatFieldMapperTests extends MapperTestCase { @@ -89,15 +94,8 @@ public void testDefaults() throws Exception { ParsedDocument doc = mapper.parse(source(b -> b.field("field", 123))); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); - assertFalse(pointField.fieldType().stored()); - assertEquals(1230, pointField.numericValue().longValue()); - IndexableField dvField = fields[1]; - assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); - assertEquals(1230, dvField.numericValue().longValue()); - assertFalse(dvField.fieldType().stored()); + assertEquals(1, fields.length); + assertEquals("LongField ", fields[0].toString()); } public void testMissingScalingFactor() { @@ -170,13 +168,9 @@ public void testStore() throws Exception { ); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(3, fields.length); - IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); - assertEquals(1230, pointField.numericValue().doubleValue(), 0d); - IndexableField dvField = fields[1]; - assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); - IndexableField storedField = fields[2]; + assertEquals(2, fields.length); + assertEquals("LongField ", fields[0].toString()); + IndexableField storedField = fields[1]; assertTrue(storedField.fieldType().stored()); assertEquals(1230, storedField.numericValue().longValue()); } @@ -191,12 +185,8 @@ public void testCoerce() throws Exception { ) ); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); - assertEquals(1230, pointField.numericValue().longValue()); - IndexableField dvField = fields[1]; - assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); + assertEquals(1, fields.length); + assertEquals("LongField ", fields[0].toString()); DocumentMapper mapper2 = createDocumentMapper( fieldMapping(b -> b.field("type", "scaled_float").field("scaling_factor", 10.0).field("coerce", false)) @@ -249,14 +239,8 @@ public void testNullValue() throws IOException { ) ); IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); - assertFalse(pointField.fieldType().stored()); - assertEquals(25, pointField.numericValue().longValue()); - IndexableField dvField = fields[1]; - assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); - assertFalse(dvField.fieldType().stored()); + assertEquals(1, fields.length); + assertEquals("LongField ", fields[0].toString()); } /** @@ -314,6 +298,19 @@ public void testMetricAndDocvalues() { assertThat(e.getCause().getMessage(), containsString("Field [time_series_metric] requires that [doc_values] is true")); } + public void testTimeSeriesIndexDefault() throws Exception { + var randomMetricType = randomFrom(TimeSeriesParams.MetricType.values()); + var indexSettings = getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.getName()) + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "dimension_field"); + var mapperService = createMapperService(indexSettings.build(), fieldMapping(b -> { + minimalMapping(b); + b.field("time_series_metric", randomMetricType.toString()); + })); + var ft = (ScaledFloatFieldMapper.ScaledFloatFieldType) mapperService.fieldType("field"); + assertThat(ft.getMetricType(), equalTo(randomMetricType)); + assertThat(ft.isIndexed(), is(false)); + } + @Override protected void randomFetchTestFieldConfig(XContentBuilder b) throws IOException { // Large floats are a terrible idea but the round trip should still work no matter how badly you configure the field diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldTypeTests.java index b07ff9dafbf9..1493ece2671e 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldTypeTests.java @@ -216,14 +216,14 @@ public void testFieldData() throws IOException { } public void testFetchSourceValue() throws IOException { - MappedFieldType mapper = new ScaledFloatFieldMapper.Builder("field", false, false).scalingFactor(100) + MappedFieldType mapper = new ScaledFloatFieldMapper.Builder("field", false, false, null).scalingFactor(100) .build(MapperBuilderContext.root(false)) .fieldType(); assertEquals(List.of(3.14), fetchSourceValue(mapper, 3.1415926)); assertEquals(List.of(3.14), fetchSourceValue(mapper, "3.1415")); assertEquals(List.of(), fetchSourceValue(mapper, "")); - MappedFieldType nullValueMapper = new ScaledFloatFieldMapper.Builder("field", false, false).scalingFactor(100) + MappedFieldType nullValueMapper = new ScaledFloatFieldMapper.Builder("field", false, false, null).scalingFactor(100) .nullValue(2.71) .build(MapperBuilderContext.root(false)) .fieldType(); diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index 66079cfd444c..903192e6ce25 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java index 3e7cc6dead06..2821c38a6b24 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ChildQuerySearchIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; @@ -843,11 +844,11 @@ public void testSimpleQueryRewrite() throws Exception { // index simple data int childId = 0; for (int i = 0; i < 10; i++) { - String parentId = formatted("p%03d", i); + String parentId = Strings.format("p%03d", i); createIndexRequest("test", "parent", parentId, null, "p_field", parentId).get(); int j = childId; for (; j < childId + 50; j++) { - String childUid = formatted("c%03d", j); + String childUid = Strings.format("c%03d", j); createIndexRequest("test", "child", childUid, parentId, "c_field", childUid).get(); } childId = j; diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java index 1594e4e817b9..f191dfcda3bd 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -180,7 +180,7 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java index 83cc38636b10..fc797be04451 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java @@ -68,7 +68,7 @@ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound c ); } WithOrdinals valuesSource = (WithOrdinals) rawValuesSource; - long maxOrd = valuesSource.globalMaxOrd(context.searcher()); + long maxOrd = valuesSource.globalMaxOrd(context.searcher().getIndexReader()); return new ParentToChildrenAggregator( name, factories, diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index 1b6cca403aba..c131be17c5e1 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -178,7 +178,7 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java index 5ff008832622..f531ac67b98f 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java @@ -68,7 +68,7 @@ protected Aggregator doCreateInternal(Aggregator children, CardinalityUpperBound ); } WithOrdinals valuesSource = (WithOrdinals) rawValuesSource; - long maxOrd = valuesSource.globalMaxOrd(context.searcher()); + long maxOrd = valuesSource.globalMaxOrd(context.searcher().getIndexReader()); return new ChildrenToParentAggregator( name, factories, diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index a8845b2731ab..8f9b7bfd7e10 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -18,7 +18,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -540,7 +540,7 @@ protected void extractInnerHitBuilders(Map inner } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index 9c297236a8e0..f3777c526994 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -11,7 +11,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -312,7 +312,7 @@ protected void extractInnerHitBuilders(Map inner } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java index cfd39c7ee8f8..7d15bbcedf8f 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java @@ -15,7 +15,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -192,7 +192,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 785edc32c143..a871056539d3 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index de319f67b780..f2d06e2b72d4 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -33,6 +33,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; @@ -216,12 +217,12 @@ protected PercolateQueryBuilder(String field, Supplier documentS super(in); field = in.readString(); name = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String documentType = in.readOptionalString(); assert documentType == null; } indexedDocumentIndex = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String indexedDocumentType = in.readOptionalString(); assert indexedDocumentType == null; } @@ -258,12 +259,12 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeString(field); out.writeOptionalString(name); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null documentType values out.writeOptionalString(null); } out.writeOptionalString(indexedDocumentIndex); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null indexedDocumentType values out.writeOptionalString(null); } @@ -660,7 +661,7 @@ public > IFD getForField( } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 722b47563559..f7c01bec3b2c 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -252,7 +252,7 @@ public void testRequiredParameters() { public void testFromJsonNoDocumentType() throws IOException { SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - QueryBuilder queryBuilder = parseQuery(formatted(""" + QueryBuilder queryBuilder = parseQuery(Strings.format(""" {"percolate" : { "document": {}, "field":"%s"}} """, queryField)); queryBuilder.toQuery(searchExecutionContext); @@ -265,14 +265,14 @@ public void testFromJsonNoType() throws IOException { documentSource = Collections.singletonList(randomSource(new HashSet<>())); SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - QueryBuilder queryBuilder = parseQuery(formatted(""" + QueryBuilder queryBuilder = parseQuery(Strings.format(""" {"percolate" : { "index": "%s", "id": "%s", "field":"%s"}} """, indexedDocumentIndex, indexedDocumentId, queryField)); rewriteAndFetch(queryBuilder, searchExecutionContext).toQuery(searchExecutionContext); } public void testBothDocumentAndDocumentsSpecified() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(formatted(""" + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(Strings.format(""" {"percolate" : { "document": {}, "documents": [{}, {}], "field":"%s"}} """, queryField))); assertThat(e.getMessage(), containsString("The following fields are not allowed together: [document, documents]")); @@ -382,7 +382,7 @@ public void testDisallowExpensiveQueries() { public void testFromJsonWithDocumentType() throws IOException { SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - String queryAsString = formatted(""" + String queryAsString = Strings.format(""" {"percolate" : { "document": {}, "document_type":"%s", "field":"%s"}} """, docType, queryField); XContentParser parser = createParserWithCompatibilityFor(JsonXContent.jsonXContent, queryAsString, RestApiVersion.V_7); @@ -398,7 +398,7 @@ public void testFromJsonWithType() throws IOException { documentSource = Collections.singletonList(randomSource(new HashSet<>())); SearchExecutionContext searchExecutionContext = createSearchExecutionContext(); - String queryAsString = formatted(""" + String queryAsString = Strings.format(""" {"percolate" : { "index": "%s", "type": "_doc", "id": "%s", "field":"%s"}} """, indexedDocumentIndex, indexedDocumentId, queryField); XContentParser parser = createParserWithCompatibilityFor(JsonXContent.jsonXContent, queryAsString, RestApiVersion.V_7); diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 5ac9922a1e07..1268a40dd5bd 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index bea4d4222fa5..d6a9a25be51f 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -45,7 +45,7 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { rankingEvaluationSpec = new RankEvalSpec(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { searchType = SearchType.fromId(in.readByte()); } } @@ -126,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { out.writeByte(searchType.id()); } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index d761b8e28aa9..4436b9f3902f 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -288,11 +288,11 @@ public void testMetricDetails() { assertEquals(expectedNdcg, detail.getNDCG(), 0.0); assertEquals(unratedDocs, detail.getUnratedDocs()); if (idcg != 0) { - assertEquals(formatted(""" + assertEquals(Strings.format(""" {"dcg":{"dcg":%s,"ideal_dcg":%s,"normalized_dcg":%s,"unrated_docs":%s}}\ """, dcg, idcg, expectedNdcg, unratedDocs), Strings.toString(detail)); } else { - assertEquals(formatted(""" + assertEquals(Strings.format(""" {"dcg":{"dcg":%s,"unrated_docs":%s}}\ """, dcg, unratedDocs), Strings.toString(detail)); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index b602f2b0a119..d4ec7ba9b9ef 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -44,7 +43,6 @@ import static java.util.Collections.singleton; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; -import static org.elasticsearch.test.TestSearchContext.SHARD_TARGET; import static org.elasticsearch.test.XContentTestUtils.insertRandomFields; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.instanceOf; @@ -54,7 +52,6 @@ public class RankEvalResponseTests extends ESTestCase { private static final Exception[] RANDOM_EXCEPTIONS = new Exception[] { new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)), new CircuitBreakingException("Data too large", 123, 456, CircuitBreaker.Durability.PERMANENT), - new SearchParseException(SHARD_TARGET, "Parse failure", new XContentLocation(12, 98)), new IllegalArgumentException("Closed resource", new RuntimeException("Resource")), new SearchPhaseExecutionException( "search", diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index 1496e32927f9..4cd12b44aaaa 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -17,9 +17,9 @@ import org.gradle.api.internal.artifacts.ArtifactAttributes apply plugin: 'elasticsearch.test-with-dependencies' apply plugin: 'elasticsearch.jdk-download' -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.internal-java-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java index 3155f49ed376..30c416ddadfe 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -100,7 +100,7 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { return Collections.singletonList(new ReindexSslConfig(environment.settings(), environment, resourceWatcherService)); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java index 974c4ead7d20..ed30fa5c4eeb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -170,7 +170,7 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { testFilter.set(new ReindexFromRemoteWithAuthTests.TestFilter(threadPool)); return Collections.emptyList(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteRequestBuildersTests.java index f06d40afeb0a..e0cfe0088d9e 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteRequestBuildersTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; @@ -228,7 +229,7 @@ public void testInitialSearchEntity() throws IOException { searchRequest.source().fetchSource(new String[] { "in1", "in2" }, new String[] { "out" }); entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); - assertEquals(XContentHelper.stripWhitespace(formatted(""" + assertEquals(XContentHelper.stripWhitespace(Strings.format(""" { "query": %s, "_source": { diff --git a/modules/repository-azure/build.gradle b/modules/repository-azure/build.gradle index 7f0b86f5db69..aa35dc85ea38 100644 --- a/modules/repository-azure/build.gradle +++ b/modules/repository-azure/build.gradle @@ -13,7 +13,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact-base' @@ -23,19 +23,21 @@ esplugin { } versions << [ - 'azure': '12.16.0', - 'azureCommon': '12.15.1', - 'azureCore': '1.27.0', - 'azureCoreHttpNetty': '1.11.9', + 'azure': '12.20.1', + 'azureCommon': '12.19.1', + 'azureCore': '1.34.0', + 'azureCoreHttpNetty': '1.12.7', + 'azureJackson': '2.13.4', + 'azureJacksonDatabind': '2.13.4.2', 'jakartaActivation': '1.2.1', 'jakartaXMLBind': '2.3.2', 'stax2API': '4.2.1', - 'woodstox': '6.2.7', + 'woodstox': '6.4.0', - 'reactorNetty': '1.0.15', - 'reactorCore': '3.4.14', - 'reactiveStreams': '1.0.3', + 'reactorNetty': '1.0.24', + 'reactorCore': '3.4.23', + 'reactiveStreams': '1.0.4', ] dependencies { @@ -45,14 +47,14 @@ dependencies { api "com.azure:azure-core:${versions.azureCore}" // jackson - api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-core:${versions.azureJackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.azureJacksonDatabind}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.azureJackson}" // jackson xml - api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" - api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" - api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" + api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.azureJackson}" + api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.azureJackson}" + api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.azureJackson}" api "jakarta.activation:jakarta.activation-api:${versions.jakartaActivation}" // The SDK uses javax.xml bindings api "jakarta.xml.bind:jakarta.xml.bind-api:${versions.jakartaXMLBind}" @@ -117,7 +119,8 @@ tasks.named("thirdPartyAudit").configure { 'com.aayushatharva.brotli4j.Brotli4jLoader', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', - 'com.aayushatharva.brotli4j.encoder.Encoders', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', @@ -368,10 +371,9 @@ testClusters.matching { it.name == "yamlRestTest" }.configureEach { } if (useFixture) { setting 'azure.client.integration_test.endpoint_suffix', azureAddress - String firstPartOfSeed = BuildParams.testSeed.tokenize(':').get(0) def ignoreTestSeed = providers.systemProperty('ignore.tests.seed').isPresent() ? PropertyNormalization.IGNORE_VALUE : PropertyNormalization.DEFAULT - setting 'thread_pool.repository_azure.max', (Math.abs(Long.parseUnsignedLong(firstPartOfSeed, 16) % 10) + 1).toString(), ignoreTestSeed + setting 'thread_pool.repository_azure.max', (Math.abs(BuildParams.random.nextLong() % 10) + 1).toString(), ignoreTestSeed } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 169164cdce2b..48dce1a86b75 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -13,7 +13,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -97,7 +97,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { AzureClientProvider azureClientProvider = AzureClientProvider.create(threadPool, settings); azureStoreService.set(createAzureStorageService(settings, azureClientProvider)); diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java index f05fe4331b07..4c9a3b1e6991 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -187,7 +187,10 @@ public void testWriteBlobWithRetries() throws Exception { if (randomBoolean()) { if (randomBoolean()) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]); + org.elasticsearch.core.Streams.readFully( + exchange.getRequestBody(), + new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))] + ); } else { Streams.readFully(exchange.getRequestBody()); AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); diff --git a/modules/repository-gcs/build.gradle b/modules/repository-gcs/build.gradle index dce2513fb3b8..4f878eb9d79d 100644 --- a/modules/repository-gcs/build.gradle +++ b/modules/repository-gcs/build.gradle @@ -1,7 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin import java.nio.file.Files @@ -16,7 +16,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact-base' @@ -26,36 +26,36 @@ esplugin { } dependencies { - api 'com.google.cloud:google-cloud-storage:1.118.1' - api 'com.google.cloud:google-cloud-core:2.0.2' - api 'com.google.cloud:google-cloud-core-http:2.0.2' - runtimeOnly 'com.google.guava:guava:30.1.1-jre' + api 'com.google.cloud:google-cloud-storage:2.13.1' + api 'com.google.cloud:google-cloud-core:2.8.28' + api 'com.google.cloud:google-cloud-core-http:2.8.28' + runtimeOnly 'com.google.guava:guava:31.1-jre' runtimeOnly 'com.google.guava:failureaccess:1.0.1' api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'com.google.api:api-common:2.2.1' - api 'com.google.api:gax:2.0.0' - api 'org.threeten:threetenbp:1.5.1' - api 'com.google.protobuf:protobuf-java-util:3.17.3' - api 'com.google.protobuf:protobuf-java:3.21.1' - api 'com.google.code.gson:gson:2.8.9' - api 'com.google.api.grpc:proto-google-common-protos:2.3.2' - api 'com.google.api.grpc:proto-google-iam-v1:1.0.14' - api 'com.google.auth:google-auth-library-credentials:1.0.0' - api 'com.google.auth:google-auth-library-oauth2-http:1.0.0' - api 'com.google.oauth-client:google-oauth-client:1.34.1' - api 'com.google.api-client:google-api-client:1.35.1' - api 'com.google.http-client:google-http-client:1.39.2' - api 'com.google.http-client:google-http-client-gson:1.39.2' - api 'com.google.http-client:google-http-client-appengine:1.39.2' - api 'com.google.http-client:google-http-client-jackson2:1.39.2' + api 'com.google.api:api-common:2.3.1' + api 'com.google.api:gax:2.20.1' + api 'org.threeten:threetenbp:1.6.5' + api "com.google.protobuf:protobuf-java-util:${versions.protobuf}" + api "com.google.protobuf:protobuf-java:${versions.protobuf}" + api 'com.google.code.gson:gson:2.10' + api 'com.google.api.grpc:proto-google-common-protos:2.9.6' + api 'com.google.api.grpc:proto-google-iam-v1:1.6.2' + api 'com.google.auth:google-auth-library-credentials:1.11.0' + api 'com.google.auth:google-auth-library-oauth2-http:1.11.0' + api "com.google.oauth-client:google-oauth-client:${versions.google_oauth_client}" + api 'com.google.api-client:google-api-client:2.1.1' + api 'com.google.http-client:google-http-client:1.42.3' + api 'com.google.http-client:google-http-client-gson:1.42.3' + api 'com.google.http-client:google-http-client-appengine:1.42.3' + api 'com.google.http-client:google-http-client-jackson2:1.42.3' api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api 'com.google.api:gax-httpjson:0.85.0' - api 'io.grpc:grpc-context:1.39.0' - api 'io.opencensus:opencensus-api:0.28.0' - api 'io.opencensus:opencensus-contrib-http-util:0.28.0' - api 'com.google.apis:google-api-services-storage:v1-rev20210127-1.32.1' + api 'com.google.api:gax-httpjson:0.105.1' + api 'io.grpc:grpc-context:1.49.2' + api 'io.opencensus:opencensus-api:0.31.1' + api 'io.opencensus:opencensus-contrib-http-util:0.31.1' + api 'com.google.apis:google-api-services-storage:v1-rev20220705-2.0.0' testImplementation project(':test:fixtures:gcs-fixture') } @@ -180,10 +180,22 @@ tasks.named("thirdPartyAudit").configure { 'org.apache.http.protocol.HttpContext', 'org.apache.http.protocol.HttpProcessor', 'org.apache.http.protocol.HttpRequestExecutor', + // commons-logging provided dependencies 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener' ) + + + if(BuildParams.graalVmRuntime == false) { + ignoreMissingClasses( + 'org.graalvm.nativeimage.hosted.Feature', + 'org.graalvm.nativeimage.hosted.Feature$BeforeAnalysisAccess', + 'org.graalvm.nativeimage.hosted.Feature$DuringAnalysisAccess', + 'org.graalvm.nativeimage.hosted.Feature$FeatureAccess', + 'org.graalvm.nativeimage.hosted.RuntimeReflection' + ) + } } boolean useFixture = false @@ -271,7 +283,7 @@ def largeBlobYamlRestTest = tasks.register("largeBlobYamlRestTest", RestIntegTes dependsOn "createServiceAccountFile" } SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) @@ -323,7 +335,7 @@ testClusters.matching { if (useFixture) { tasks.register("yamlRestTestApplicationDefaultCredentials", RestIntegTestTask.class) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) } diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index d2cd3dc6a2b2..8a4fb8eb41bd 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -10,10 +10,12 @@ import fixture.gcs.FakeOAuth2HttpHandler; import fixture.gcs.GoogleCloudStorageHttpHandler; +import fixture.gcs.TestUtils; import com.google.api.gax.retrying.RetrySettings; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRetryStrategy; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; @@ -226,6 +228,7 @@ StorageOptions createStorageOptions( ) { StorageOptions options = super.createStorageOptions(gcsClientSettings, httpTransportOptions); return options.toBuilder() + .setStorageRetryStrategy(StorageRetryStrategy.getLegacyStorageRetryStrategy()) .setHost(options.getHost()) .setCredentials(options.getCredentials()) .setRetrySettings( diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 74184fd0b275..4d29f14c7adb 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -29,12 +29,12 @@ import org.elasticsearch.common.blobstore.support.BlobMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import java.io.ByteArrayInputStream; diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 4743eb71f832..e65f391667d7 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -19,6 +19,7 @@ import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRetryStrategy; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -176,6 +177,7 @@ StorageOptions createStorageOptions( final HttpTransportOptions httpTransportOptions ) { final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() + .setStorageRetryStrategy(StorageRetryStrategy.getLegacyStorageRetryStrategy()) .setTransportOptions(httpTransportOptions) .setHeaderProvider(() -> { return Strings.hasLength(gcsClientSettings.getApplicationName()) diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 3a9a4fa99c57..ef38da7eb20e 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -13,6 +13,7 @@ import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.StorageException; import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRetryStrategy; import com.sun.net.httpserver.HttpHandler; import org.apache.http.HttpStatus; @@ -60,6 +61,7 @@ import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeLimit; import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeStart; import static fixture.gcs.GoogleCloudStorageHttpHandler.parseMultipartRequestBody; +import static fixture.gcs.TestUtils.createServiceAccount; import static java.nio.charset.StandardCharsets.UTF_8; import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageBlobStore.MAX_DELETES_PER_BATCH; @@ -67,7 +69,6 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.TOKEN_URI_SETTING; -import static org.elasticsearch.repositories.gcs.TestUtils.createServiceAccount; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -140,6 +141,7 @@ StorageOptions createStorageOptions( retrySettingsBuilder.setMaxAttempts(maxRetries + 1); } return options.toBuilder() + .setStorageRetryStrategy(StorageRetryStrategy.getLegacyStorageRetryStrategy()) .setHost(options.getHost()) .setCredentials(options.getCredentials()) .setRetrySettings(retrySettingsBuilder.build()) @@ -203,7 +205,7 @@ public void testWriteBlobWithRetries() throws Exception { assertThat(content.isPresent(), is(true)); assertThat(content.get().v1(), equalTo(blobContainer.path().buildAsString() + "write_blob_max_retries")); if (Objects.deepEquals(bytes, BytesReference.toBytes(content.get().v2()))) { - byte[] response = formatted(""" + byte[] response = Strings.format(""" {"bucket":"bucket","name":"%s"} """, content.get().v1()).getBytes(UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); @@ -216,7 +218,10 @@ public void testWriteBlobWithRetries() throws Exception { } if (randomBoolean()) { if (randomBoolean()) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]); + org.elasticsearch.core.Streams.readFully( + exchange.getRequestBody(), + new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))] + ); } else { Streams.readFully(exchange.getRequestBody()); exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1); @@ -239,7 +244,7 @@ public void testWriteBlobWithReadTimeouts() { httpServer.createContext("/upload/storage/v1/b/bucket/o", exchange -> { if (randomBoolean()) { if (randomBoolean()) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]); + org.elasticsearch.core.Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]); } else { Streams.readFully(exchange.getRequestBody()); } @@ -349,7 +354,7 @@ public void testWriteLargeBlob() throws IOException { if (range.equals("bytes */*")) { final int receivedSoFar = bytesReceived.get(); if (receivedSoFar > 0) { - exchange.getResponseHeaders().add("Range", formatted("bytes=0-%d", receivedSoFar)); + exchange.getResponseHeaders().add("Range", Strings.format("bytes=0-%d", receivedSoFar)); } exchange.getResponseHeaders().add("Content-Length", "0"); exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1); @@ -371,7 +376,7 @@ public void testWriteLargeBlob() throws IOException { exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); return; } else { - exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=%d/%d", rangeStart, rangeEnd)); + exchange.getResponseHeaders().add("Range", Strings.format("bytes=%d/%d", rangeStart, rangeEnd)); exchange.getResponseHeaders().add("Content-Length", "0"); exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1); return; diff --git a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index f8fe2b5919d9..73eb4a48a21d 100644 --- a/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/modules/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; @@ -260,7 +261,7 @@ private static Tuple randomCredential(final S credentialBuilder.setPrivateKeyId("private_key_id_" + clientName); credentialBuilder.setScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); final String encodedPrivateKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); - final String serviceAccount = formatted(""" + final String serviceAccount = Strings.format(""" { "type": "service_account", "project_id": "project_id_%s", diff --git a/modules/repository-s3/build.gradle b/modules/repository-s3/build.gradle index a31bdf7ae9b7..892c8c42903e 100644 --- a/modules/repository-s3/build.gradle +++ b/modules/repository-s3/build.gradle @@ -1,7 +1,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE @@ -13,7 +13,7 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.internal-test-artifact-base' @@ -121,7 +121,7 @@ String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs") String s3STSBucket = System.getenv("amazon_s3_bucket_sts") String s3STSBasePath = System.getenv("amazon_s3_base_path_sts") -boolean s3DisableChunkedEncoding = (new Random(Long.parseUnsignedLong(BuildParams.testSeed.tokenize(':').get(0), 16))).nextBoolean() +boolean s3DisableChunkedEncoding = BuildParams.random.nextBoolean() // If all these variables are missing then we are testing against the internal fixture instead, which has the following // credentials hard-coded in. @@ -244,7 +244,7 @@ if (useFixture) { tasks.register("yamlRestTestMinio", RestIntegTestTask) { description = "Runs REST tests using the Minio repository." SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) @@ -272,7 +272,7 @@ if (useFixture) { tasks.register("yamlRestTestECS", RestIntegTestTask.class) { description = "Runs tests using the ECS repository." SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) systemProperty 'tests.rest.blacklist', [ @@ -298,7 +298,7 @@ if (useFixture) { tasks.register("yamlRestTestSTS", RestIntegTestTask.class) { description = "Runs tests with the STS (Secure Token Service)" SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) systemProperty 'tests.rest.blacklist', [ diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 79698eb4e282..3d4d426944db 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -15,7 +15,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.RepositoryMetadata; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -102,7 +102,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { service.set(s3Service(environment)); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index f151b5f82785..04c47bb9b55e 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.mocksocket.MockHttpServer; @@ -53,7 +54,7 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { assertEquals(ROLE_NAME, params.get("RoleSessionName")); exchange.getResponseHeaders().add("Content-Type", "text/xml; charset=UTF-8"); - byte[] response = formatted( + byte[] response = Strings.format( """ diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index de9842462d81..158af7b426ab 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -190,7 +190,10 @@ public void testWriteBlobWithRetries() throws Exception { if (randomBoolean()) { if (randomBoolean()) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]); + org.elasticsearch.core.Streams.readFully( + exchange.getRequestBody(), + new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))] + ); } else { Streams.readFully(exchange.getRequestBody()); exchange.sendResponseHeaders( @@ -222,7 +225,7 @@ public void testWriteBlobWithReadTimeouts() { httpServer.createContext(downloadStorageEndpoint(blobContainer, "write_blob_timeout"), exchange -> { if (randomBoolean()) { if (randomBoolean()) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]); + org.elasticsearch.core.Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]); } else { Streams.readFully(exchange.getRequestBody()); } @@ -317,7 +320,10 @@ public void testWriteLargeBlob() throws Exception { // sends an error back or let the request time out if (useTimeout == false) { if (randomBoolean() && contentLength > 0) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))]); + org.elasticsearch.core.Streams.readFully( + exchange.getRequestBody(), + new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))] + ); } else { Streams.readFully(exchange.getRequestBody()); exchange.sendResponseHeaders( @@ -412,7 +418,10 @@ public void testWriteLargeBlobStreaming() throws Exception { // sends an error back or let the request time out if (useTimeout == false) { if (randomBoolean() && contentLength > 0) { - Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))]); + org.elasticsearch.core.Streams.readFully( + exchange.getRequestBody(), + new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))] + ); } else { Streams.readFully(exchange.getRequestBody()); exchange.sendResponseHeaders( diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 164c64ad1f61..7b671802f3a2 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -8,8 +8,8 @@ import org.elasticsearch.gradle.PropertyNormalization -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.test.fixtures' diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java index 1d8f18390169..580348564b07 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/http/URLHttpClient.java @@ -21,9 +21,9 @@ import org.apache.http.ssl.SSLContexts; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Streams; import org.elasticsearch.rest.RestStatus; import java.io.Closeable; diff --git a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java index ff7218a9e57f..ace6555860f7 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java @@ -11,7 +11,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.blobstore.url.http.URLHttpClient; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -88,7 +88,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { final URLHttpClient.Factory apacheURLHttpClientFactory = new URLHttpClient.Factory(); diff --git a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java index dfa4d310c96b..88a248453cc9 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/AbstractURLBlobStoreTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -57,7 +58,7 @@ public void testNoBlobFound() throws IOException { ignored.read(); fail("Should have thrown NoSuchFileException exception"); } catch (NoSuchFileException e) { - assertEquals(formatted("blob object [%s] not found", incorrectBlobName), e.getMessage()); + assertEquals(Strings.format("blob object [%s] not found", incorrectBlobName), e.getMessage()); } } } diff --git a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStreamTests.java b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStreamTests.java index 2c8e7312def8..d1425e56072e 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStreamTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/common/blobstore/url/http/RetryingHttpInputStreamTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -17,7 +18,6 @@ import java.net.URI; import java.util.Iterator; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; @@ -44,7 +44,7 @@ public void testUnderlyingInputStreamIsAbortedAfterAFailureAndRetries() throws E when(secondHttpResponseInputStream.read(any(), anyInt(), anyInt())).thenReturn(blobSize - firstChunkSize).thenReturn(-1); final Map secondResponseHeaders = Map.of( "Content-Range", - String.format(Locale.ROOT, "bytes %d-%d/%d", firstChunkSize, blobSize - 1, blobSize) + Strings.format("bytes %d-%d/%d", firstChunkSize, blobSize - 1, blobSize) ); final List responses = List.of( diff --git a/modules/runtime-fields-common/build.gradle b/modules/runtime-fields-common/build.gradle index 5a2d268cf7a4..c4db67a89d36 100644 --- a/modules/runtime-fields-common/build.gradle +++ b/modules/runtime-fields-common/build.gradle @@ -7,8 +7,8 @@ */ apply plugin: 'elasticsearch.validate-rest-spec' -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'Module for runtime fields features and extensions that have large dependencies' diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java index 5629406a662e..bc0b39e98999 100644 --- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java +++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -76,7 +76,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { grokHelper.finishInitializing(threadPool); return List.of(); diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/114_composite_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/114_composite_errors.yml new file mode 100644 index 000000000000..cfaafedd943a --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/114_composite_errors.yml @@ -0,0 +1,124 @@ +--- +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + rtf: + type: composite + on_script_error: continue + script: + source: | + if (doc["message"].value.equals("fail")) throw new Exception("error"); + emit('msg', doc['message'].value); + fields: + msg: + type: keyword + rtf_strict: + type: composite + on_script_error: fail + script: + source: | + if (doc["message"].value.equals("fail")) throw new Exception("error"); + emit('msg', doc['message'].value); + fields: + msg: + type: keyword + properties: + timestamp: + type: date + message: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: | + {"index":{}} + {"timestamp": "1998-04-30T14:30:17-05:00", "message" : "this is okay"} + {"index":{}} + {"timestamp": "1998-04-30T14:30:53-05:00", "message" : "fail"} + +--- +"query with continue on error": + - do: + search: + index: test + body: + query: + term: + rtf.msg: "this is okay" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.message: "this is okay"} + +--- +"query with fail on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: test + body: + query: + term: + rtf_strict.msg: "this is okay" + +--- +"query with search time field": + - do: + search: + index: test + body: + query: + term: + rtf_search.msg: "this is okay" + runtime_mappings: + rtf_search: + type: composite + on_script_error: continue + script: + source: | + if (doc["message"].value.equals("fail")) throw new Exception("error"); + emit('msg', doc['message'].value); + fields: + msg: + type: keyword + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._source.message: "this is okay"} + +--- +fetch: + - do: + search: + index: test + body: + sort: timestamp + fields: + - message + - rtf.msg + - match: {hits.total.value: 2} + - match: {hits.hits.0.fields.message: ["this is okay"] } + - match: {hits.hits.0.fields.rtf\.msg: ["this is okay"] } + - match: {hits.hits.1.fields.message: ["fail"] } + - is_false: hits.hits.1.fields.rtf.msg + +--- +"terms agg": + - do: + search: + index: test + body: + aggs: + messages: + terms: + field: rtf.msg + - match: { hits.total.value: 2} + - length: { aggregations.messages.buckets: 1 } + - match: { aggregations.messages.buckets.0.key: "this is okay" } + - match: { aggregations.messages.buckets.0.doc_count: 1 } diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/14_keyword_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/14_keyword_errors.yml new file mode 100644 index 000000000000..d21259ab551a --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/14_keyword_errors.yml @@ -0,0 +1,216 @@ +--- +setup: + - do: + indices.create: + index: testindex + body: + settings: + number_of_shards: 1 + mappings: + runtime: + first_char: + type: keyword + script: | + emit(doc['name'].value.substring(0,1)); + on_script_error: continue + first_char_strict_error: + type: keyword + script: | + emit(doc['name'].value.substring(0,1)); + on_script_error: fail + properties: + name: + type: keyword + + - do: + bulk: + index: testindex + refresh: true + body: | + {"index":{}} + {"name": "foo"} + {"index":{}} + {"name": ""} + +--- +"Query rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + first_char: "f" + fields: [ name, first_char ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.first_char: [ f ] } + +--- +"Query rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + first_char_strict_error: "f" + fields: [ name, first_char_strict_error ] + +--- +"Aggregate on rtf with on_script_error continue": + - do: + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: first_char + - length: { aggregations.firstchar.buckets: 1 } + - match: { aggregations.firstchar.buckets.0.key: "f" } + +--- +"Aggregate on rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: first_char_strict_error + +--- +"Fields retrieval with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name, first_char ] + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.first_char: [ f ] } + - match: { hits.hits.1.fields.name: [ "" ] } + - is_false: hits.hits.1.fields.first_char + +--- +"Fields retrieval with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name, first_char_strict_error ] + +--- +"Sorting with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name ] + sort: first_char + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.1.fields.name: [ "" ] } + +--- +"Sorting with with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name ] + sort: first_char_strict_error + +--- +"Query search time rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + first_char_search: "f" + fields: [ name, first_char_search ] + runtime_mappings: + first_char_search: + type: keyword + script: | + emit(doc['name'].value.substring(0,1)); + on_script_error: continue + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.first_char_search: [ f ] } + +--- +"Query search time rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + first_char_search: "f" + fields: [ name, first_char_search ] + runtime_mappings: + first_char_search: + type: keyword + script: | + emit(doc['name'].value.substring(0,1)); + on_script_error: fail + +--- +"Change error behaviour for lenient runtime field": + + - do: + indices.put_mapping: + index: testindex + body: + runtime: + first_char_variant: + type: keyword + script: | + emit(doc['name'].value.substring(0,1)); + + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + first_char_variant: "f" + + - do: + indices.put_mapping: + index: testindex + body: + runtime: + first_char_variant: + type: keyword + script: | + emit(doc['name'].value.substring(0,1)); + on_script_error: continue + + - do: + search: + index: testindex + body: + query: + match: + first_char_variant: "f" + fields: [ name, first_char_variant ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.first_char_variant: [ f ] } diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/24_long_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/24_long_errors.yml new file mode 100644 index 000000000000..a832ff78bbed --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/24_long_errors.yml @@ -0,0 +1,176 @@ +--- +setup: + - do: + indices.create: + index: testindex + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + rtf: + type: long + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: continue + rtf_strict_error: + type: long + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: fail + properties: + name: + type: keyword + + - do: + bulk: + index: testindex + refresh: true + body: | + {"index":{}} + {"name": "foo"} + {"index":{}} + {"name": ""} + +--- +"Query rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf: 3 + fields: [ name, rtf ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.rtf: [ 3 ] } + +--- +"Query rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_strict_error: 3 + fields: [ name, rtf_strict_error ] + +--- +"Aggregate on rtf with on_script_error continue": + - do: + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: rtf + - length: { aggregations.firstchar.buckets: 1 } + - match: { aggregations.firstchar.buckets.0.key: 3 } + +--- +"Aggregate on rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: rtf_strict_error + +--- +"Fields retrieval with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name, rtf ] + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.rtf: [ 3 ] } + - match: { hits.hits.1.fields.name: [ "" ] } + - is_false: hits.hits.1.fields.rtf + +--- +"Fields retrieval with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name, rtf_strict_error ] + +--- +"Sorting with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name ] + sort: rtf + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.1.fields.name: [ "" ] } + +--- +"Sorting with with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name ] + sort: rtf_strict_error + +--- +"Query search time rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf_search: 3 + fields: [ name, rtf_search ] + runtime_mappings: + rtf_search: + type: long + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: continue + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.rtf_search: [ 3 ] } + +--- +"Query search time rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_search: 3 + fields: [ name, rtf_search ] + runtime_mappings: + rtf_search: + type: long + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: fail diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/34_double_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/34_double_errors.yml new file mode 100644 index 000000000000..2371dd2f3f49 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/34_double_errors.yml @@ -0,0 +1,176 @@ +--- +setup: + - do: + indices.create: + index: testindex + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + rtf: + type: double + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: continue + rtf_strict_error: + type: double + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: fail + properties: + name: + type: keyword + + - do: + bulk: + index: testindex + refresh: true + body: | + {"index":{}} + {"name": "foo"} + {"index":{}} + {"name": ""} + +--- +"Query rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf: 3 + fields: [ name, rtf ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.rtf: [ 3.0 ] } + +--- +"Query rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_strict_error: 3 + fields: [ name, rtf_strict_error ] + +--- +"Aggregate on rtf with on_script_error continue": + - do: + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: rtf + - length: { aggregations.firstchar.buckets: 1 } + - match: { aggregations.firstchar.buckets.0.key: 3.0 } + +--- +"Aggregate on rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: rtf_strict_error + +--- +"Fields retrieval with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name, rtf ] + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.rtf: [ 3.0 ] } + - match: { hits.hits.1.fields.name: [ "" ] } + - is_false: hits.hits.1.fields.rtf + +--- +"Fields retrieval with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name, rtf_strict_error ] + +--- +"Sorting with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name ] + sort: rtf + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.1.fields.name: [ "" ] } + +--- +"Sorting with with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ name ] + sort: rtf_strict_error + +--- +"Query search time rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf_search: 3 + fields: [ name, rtf_search ] + runtime_mappings: + rtf_search: + type: double + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: continue + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.name: [ foo ] } + - match: { hits.hits.0.fields.rtf_search: [ 3.0 ] } + +--- +"Query search time rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_search: 3 + fields: [ name, rtf_search ] + runtime_mappings: + rtf_search: + type: double + script: | + if(doc['name'].value.equals("")) throw new Exception("empty"); + emit(doc['name'].value.length()); + on_script_error: fail diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/44_date_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/44_date_errors.yml new file mode 100644 index 000000000000..e38de91c25d8 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/44_date_errors.yml @@ -0,0 +1,184 @@ +--- +setup: + - do: + indices.create: + index: testindex + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + rtf: + type: date + format: yyyy-MM-dd + script: | + if(doc['millis_since_epoch'].value < 0) throw new Exception("date before 1970"); + emit(doc['millis_since_epoch'].value); + on_script_error: continue + rtf_strict_error: + type: date + format: yyyy-MM-dd + script: | + if(doc['millis_since_epoch'].value < 0) throw new Exception("date before 1970"); + emit(doc['millis_since_epoch'].value); + on_script_error: fail + properties: + millis_since_epoch: + type: long + + - do: + bulk: + index: testindex + refresh: true + body: | + {"index":{}} + {"millis_since_epoch": 1671033474411} + {"index":{}} + {"millis_since_epoch": -1} + +--- +"Query rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + range: + rtf: + gte: "2022-12-14" + fields: [ millis_since_epoch, rtf ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.millis_since_epoch: [ 1671033474411 ] } + - match: { hits.hits.0.fields.rtf: [ "2022-12-14" ] } + +--- +"Query rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + range: + rtf_strict_error: + gte: "2022-12-14" + fields: [ millis_since_epoch, rtf_strict_error ] + +--- +"Aggregate on rtf with on_script_error continue": + - do: + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: rtf + - length: { aggregations.firstchar.buckets: 1 } + - match: { aggregations.firstchar.buckets.0.key_as_string: "2022-12-14" } + +--- +"Aggregate on rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + aggs: + firstchar: + terms: + field: rtf_strict_error + +--- +"Fields retrieval with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ millis_since_epoch, rtf ] + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.millis_since_epoch: [ 1671033474411 ] } + - match: { hits.hits.0.fields.rtf: [ "2022-12-14" ] } + - match: { hits.hits.1.fields.millis_since_epoch: [ -1 ] } + - is_false: hits.hits.1.fields.rtf + +--- +"Fields retrieval with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ millis_since_epoch, rtf_strict_error ] + +--- +"Sorting with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ millis_since_epoch ] + sort: rtf + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.millis_since_epoch: [ 1671033474411 ] } + - match: { hits.hits.1.fields.millis_since_epoch: [ -1 ] } + +--- +"Sorting with with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ millis_since_epoch ] + sort: rtf_strict_error + +--- +"Query search time rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + range: + rtf_search: + gte: "2022-12-14" + fields: [ millis_since_epoch, rtf_search ] + runtime_mappings: + rtf_search: + type: date + format: yyyy-MM-dd + script: | + if(doc['millis_since_epoch'].value < 0) throw new Exception("date before 1970"); + emit(doc['millis_since_epoch'].value); + on_script_error: continue + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.millis_since_epoch: [ 1671033474411 ] } + - match: { hits.hits.0.fields.rtf_search: [ "2022-12-14" ] } + +--- +"Query search time rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + range: + rtf_search: + gte: "2022-12-14" + fields: [ millis_since_epoch, rtf_search ] + runtime_mappings: + rtf_search: + type: date + format: yyyy-MM-dd + script: | + if(doc['millis_since_epoch'].value < 0) throw new Exception("date before 1970"); + emit(doc['millis_since_epoch'].value); + on_script_error: fail diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/54_ip_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/54_ip_errors.yml new file mode 100644 index 000000000000..1dfbfa6117a6 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/54_ip_errors.yml @@ -0,0 +1,177 @@ +--- +setup: + - do: + indices.create: + index: testindex + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + rtf: + type: ip + script: | + if(doc['ip_string'].value.length() <= 0) throw new Exception("empty"); + emit(doc['ip_string'].value); + on_script_error: continue + rtf_strict_error: + type: ip + script: | + if(doc['ip_string'].value.length() <= 0) throw new Exception("empty"); + emit(doc['ip_string'].value); + on_script_error: fail + properties: + ip_string: + type: keyword + + - do: + bulk: + index: testindex + refresh: true + body: | + {"index":{}} + {"ip_string": "192.68.0.1"} + {"index":{}} + {"ip_string": ""} + +--- +"Query rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + term: + rtf: + 192.68.0.1 + fields: [ ip_string, rtf ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.ip_string: [ "192.68.0.1" ] } + - match: { hits.hits.0.fields.rtf: [ 192.68.0.1 ] } + +--- +"Query rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + term: + rtf_strict_error: 192.68.0.1 + fields: [ ip_string, rtf_strict_error ] + +--- +"Aggregate on rtf with on_script_error continue": + - do: + search: + index: testindex + body: + aggs: + rtf: + terms: + field: rtf + - length: { aggregations.rtf.buckets: 1 } + - match: { aggregations.rtf.buckets.0.key: 192.68.0.1 } + +--- +"Aggregate on rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + aggs: + rtf: + terms: + field: rtf_strict_error + +--- +"Fields retrieval with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ ip_string, rtf ] + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.ip_string: [ "192.68.0.1" ] } + - match: { hits.hits.0.fields.rtf: [ "192.68.0.1" ] } + - match: { hits.hits.1.fields.ip_string: [ "" ] } + - is_false: hits.hits.1.fields.rtf + +--- +"Fields retrieval with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ ip_string, rtf_strict_error ] + +--- +"Sorting with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ ip_string ] + sort: rtf + - match: { hits.total.value: 2 } + - match: { hits.hits.0.fields.ip_string: [ "192.68.0.1" ] } + - match: { hits.hits.1.fields.ip_string: [ "" ] } + +--- +"Sorting with with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ ip_string ] + sort: rtf_strict_error + +--- +"Query search time rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf_search: "192.68.0.1" + fields: [ ip_string, rtf_search ] + runtime_mappings: + rtf_search: + type: ip + script: | + if(doc['ip_string'].value.length() <= 0) throw new Exception("empty"); + emit(doc['ip_string'].value); + on_script_error: continue + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.ip_string: [ "192.68.0.1" ] } + - match: { hits.hits.0.fields.rtf_search: [ "192.68.0.1" ] } + +--- +"Query search time rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_search: "192.68.0.1" + fields: [ ip_string, rtf_search ] + runtime_mappings: + rtf_search: + type: ip + script: | + if(doc['ip_string'].value.length() <= 0) throw new Exception("empty"); + emit(doc['ip_string'].value); + on_script_error: fail diff --git a/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/64_boolean_errors.yml b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/64_boolean_errors.yml new file mode 100644 index 000000000000..bfdd15b26994 --- /dev/null +++ b/modules/runtime-fields-common/src/yamlRestTest/resources/rest-api-spec/test/runtime_fields/64_boolean_errors.yml @@ -0,0 +1,184 @@ +--- +setup: + - do: + indices.create: + index: testindex + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + runtime: + rtf: + type: boolean + script: | + if(doc['age'].value < 0) throw new Exception("invalid age"); + emit(doc['age'].value >= 18); + on_script_error: continue + rtf_strict_error: + type: boolean + script: | + if(doc['age'].value <= 0) throw new Exception("invalid age"); + emit(doc['age'].value >=18); + on_script_error: fail + properties: + age: + type: integer + + - do: + bulk: + index: testindex + refresh: true + body: | + {"index":{}} + {"age": 14} + {"index":{}} + {"age": 20} + {"index":{}} + {"age": -1} + +--- +"Query rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf: + true + fields: [ age, rtf ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.age: [ 20 ] } + - match: { hits.hits.0.fields.rtf: [ true ] } + +--- +"Query rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_strict_error: true + fields: [ age, rtf_strict_error ] + +--- +"Aggregate on rtf with on_script_error continue": + - do: + search: + index: testindex + body: + aggs: + rtf: + terms: + field: rtf + order: { "_key": "asc" } + - length: { aggregations.rtf.buckets: 2 } + - match: { aggregations.rtf.buckets.0.key_as_string: "false" } + - match: { aggregations.rtf.buckets.1.key_as_string: "true" } +--- +"Aggregate on rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + aggs: + rtf: + terms: + field: rtf_strict_error + +--- +"Fields retrieval with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ age, rtf ] + sort: { "age": "desc" } + - match: { hits.total.value: 3 } + - match: { hits.hits.0.fields.age: [ 20 ] } + - match: { hits.hits.0.fields.rtf: [ true ] } + - match: { hits.hits.1.fields.age: [ 14 ] } + - match: { hits.hits.1.fields.rtf: [ false ] } + - match: { hits.hits.2.fields.age: [ -1 ] } + - is_false: hits.hits.2.fields.rtf + +--- +"Fields retrieval with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ age, rtf_strict_error ] + +--- +"Sorting with ignoring error": + - do: + search: + index: testindex + body: + query: { match_all: { } } + fields: [ age ] + sort: rtf + - match: { hits.total.value: 3 } + - match: { hits.hits.0.fields.age: [ 14 ] } + - match: { hits.hits.1.fields.age: [ 20 ] } + - match: { hits.hits.2.fields.age: [ -1 ] } + +--- +"Sorting with with failing on error": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: { match_all: { } } + fields: [ age ] + sort: rtf_strict_error + +--- +"Query search time rtf with on_script_error continue": + - do: + search: + index: testindex + body: + query: + match: + rtf_search: true + fields: [ age, rtf_search ] + runtime_mappings: + rtf_search: + type: boolean + script: | + if(doc['age'].value < 0) throw new Exception("invalid age"); + emit(doc['age'].value >= 18); + on_script_error: continue + + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.age: [ 20 ] } + - match: { hits.hits.0.fields.rtf_search: [ true ] } + +--- +"Query search time rtf with on_script_error fail": + - do: + catch: /type=script_exception, reason=runtime error/ + search: + index: testindex + body: + query: + match: + rtf_search: true + fields: [ age, rtf_search ] + runtime_mappings: + rtf_search: + type: boolean + script: | + if(doc['age'].value < 0) throw new Exception("invalid age"); + emit(doc['age'].value >= 18); + on_script_error: fail diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java index f7690700d513..27bd1791dc4d 100644 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java @@ -14,7 +14,7 @@ import org.elasticsearch.Build; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.core.TimeValue; @@ -91,7 +91,7 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { if (enabled == false) { extender.set(null); diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index bf43c4a88d91..5a6a13d5fd3f 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -7,12 +7,12 @@ */ import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.LegacyJavaRestTestPlugin import org.elasticsearch.gradle.internal.test.InternalClusterTestPlugin -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' apply plugin: 'elasticsearch.publish' @@ -34,6 +34,8 @@ configurations { } dependencies { + api project(":libs:elasticsearch-ssl-config") + // network stack api "io.netty:netty-buffer:${versions.netty}" api "io.netty:netty-codec:${versions.netty}" @@ -71,7 +73,7 @@ TaskProvider pooledInternalClusterTest = tasks.register("pooledInternalClu TaskProvider pooledJavaRestTest = tasks.register("pooledJavaRestTest", RestIntegTestTask) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet javaRestTestSourceSet = sourceSets.getByName(InternalJavaRestTestPlugin.SOURCE_SET_NAME) + SourceSet javaRestTestSourceSet = sourceSets.getByName(LegacyJavaRestTestPlugin.SOURCE_SET_NAME) setTestClassesDirs(javaRestTestSourceSet.getOutput().getClassesDirs()) setClasspath(javaRestTestSourceSet.getRuntimeClasspath()) @@ -91,7 +93,8 @@ tasks.named("thirdPartyAudit").configure { 'com.aayushatharva.brotli4j.Brotli4jLoader', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', - 'com.aayushatharva.brotli4j.encoder.Encoders', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java index eaecc13a15d8..b381e0ea8bfb 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.ESNetty4IntegTestCase; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.Strings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -53,7 +54,7 @@ public void testThatNettyHttpServerSupportsPipelining() throws Exception { private void assertOpaqueIdsInOrder(Collection opaqueIds) { // check if opaque ids are monotonically increasing int i = 0; - String msg = formatted("Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + String msg = Strings.format("Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); for (String opaqueId : opaqueIds) { assertThat(msg, opaqueId, is(String.valueOf(i++))); } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java index 4566d2a56d95..3eccb8386066 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java @@ -14,13 +14,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.junit.annotations.Network; import org.elasticsearch.transport.TransportInfo; -import java.util.Locale; - import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; @@ -39,7 +38,7 @@ public class Netty4TransportMultiPortIntegrationIT extends ESNetty4IntegTestCase protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { if (randomPort == -1) { randomPort = randomIntBetween(49152, 65525); - randomPortRange = String.format(Locale.ROOT, "%s-%s", randomPort, randomPort + 10); + randomPortRange = Strings.format("%s-%s", randomPort, randomPort + 10); } Settings.Builder builder = Settings.builder() .put(super.nodeSettings(nodeOrdinal, otherSettings)) diff --git a/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java b/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java index b1e2a6d7fdd1..e705c4303687 100644 --- a/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java +++ b/modules/transport-netty4/src/javaRestTest/java/org/elasticsearch/rest/Netty4HeadBodyIsEmptyIT.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.nullValue; public class Netty4HeadBodyIsEmptyIT extends ESRestTestCase { public void testHeadRoot() throws IOException { @@ -59,8 +60,8 @@ public void testDocumentExists() throws IOException { public void testIndexExists() throws IOException { createTestDoc(); - headTestCase("/test", emptyMap(), greaterThan(0)); - headTestCase("/test", singletonMap("pretty", "true"), greaterThan(0)); + headTestCase("/test", emptyMap(), nullValue(Integer.class)); + headTestCase("/test", singletonMap("pretty", "true"), nullValue(Integer.class)); } public void testAliasExists() throws IOException { @@ -177,7 +178,8 @@ private void headTestCase( request.setOptions(expectWarnings(expectedWarnings)); Response response = client().performRequest(request); assertEquals(expectedStatusCode, response.getStatusLine().getStatusCode()); - assertThat(Integer.valueOf(response.getHeader("Content-Length")), matcher); + final var contentLength = response.getHeader("Content-Length"); + assertThat(contentLength == null ? null : Integer.valueOf(contentLength), matcher); assertNull("HEAD requests shouldn't have a response body but " + url + " did", response.getEntity()); } diff --git a/modules/transport-netty4/src/main/java/module-info.java b/modules/transport-netty4/src/main/java/module-info.java index 92217b419c66..5f94b97be782 100644 --- a/modules/transport-netty4/src/main/java/module-info.java +++ b/modules/transport-netty4/src/main/java/module-info.java @@ -10,6 +10,7 @@ requires jdk.net; requires org.elasticsearch.base; requires org.elasticsearch.server; + requires org.elasticsearch.sslconfig; requires org.elasticsearch.xcontent; requires org.apache.logging.log4j; requires org.apache.lucene.core; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java index 61c98788f17f..f5a32a0ec768 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -216,7 +216,8 @@ private void doWrite(ChannelHandlerContext ctx, Netty4ChunkedHttpResponse readyR combiner.add((Future) first); currentChunkedWrite = new ChunkedWrite(combiner, promise, readyResponse); if (enqueueWrite(ctx, readyResponse, first)) { - // we were able to write out the first chunk directly, try writing out subsequent chunks until the channel becomes unwritable + // We were able to write out the first chunk directly, try writing out subsequent chunks until the channel becomes unwritable. + // NB "writable" means there's space in the downstream ChannelOutboundBuffer, we aren't trying to saturate the physical channel. while (ctx.channel().isWritable()) { if (writeChunk(ctx, combiner, readyResponse.body())) { finishChunkedWrite(); @@ -280,6 +281,7 @@ private boolean doFlush(ChannelHandlerContext ctx) throws IOException { return false; } while (channel.isWritable()) { + // NB "writable" means there's space in the downstream ChannelOutboundBuffer, we aren't trying to saturate the physical channel. WriteOperation currentWrite = queuedWrites.poll(); if (currentWrite == null) { doWriteQueued(ctx); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index b2550d6ec42a..e741772eedb5 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -24,7 +24,10 @@ import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.ssl.SslHandler; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; import io.netty.util.AttributeKey; @@ -32,6 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -41,23 +45,29 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpReadTimeoutException; import org.elasticsearch.http.HttpServerChannel; +import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.transport.netty4.AcceptChannelHandler; import org.elasticsearch.transport.netty4.NetUtils; import org.elasticsearch.transport.netty4.Netty4Utils; import org.elasticsearch.transport.netty4.Netty4WriteThrottlingHandler; import org.elasticsearch.transport.netty4.NettyAllocator; import org.elasticsearch.transport.netty4.NettyByteBufSizer; +import org.elasticsearch.transport.netty4.SSLExceptionHelper; import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.transport.netty4.TLSConfig; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.net.InetSocketAddress; import java.util.concurrent.TimeUnit; +import java.util.function.BiPredicate; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; @@ -131,6 +141,8 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { private final SharedGroupFactory sharedGroupFactory; private final RecvByteBufAllocator recvByteBufAllocator; + private final TLSConfig tlsConfig; + private final AcceptChannelHandler.AcceptPredicate acceptChannelPredicate; private final int readTimeoutMillis; private final int maxCompositeBufferComponents; @@ -146,7 +158,10 @@ public Netty4HttpServerTransport( Dispatcher dispatcher, ClusterSettings clusterSettings, SharedGroupFactory sharedGroupFactory, - Tracer tracer + Tracer tracer, + TLSConfig tlsConfig, + @Nullable AcceptChannelHandler.AcceptPredicate acceptChannelPredicate + ) { super( settings, @@ -161,6 +176,8 @@ public Netty4HttpServerTransport( Netty4Utils.setAvailableProcessors(EsExecutors.allocatedProcessors(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; + this.tlsConfig = tlsConfig; + this.acceptChannelPredicate = acceptChannelPredicate; this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); @@ -252,6 +269,9 @@ protected void doStart() { serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress); bindServer(); + if (acceptChannelPredicate != null) { + acceptChannelPredicate.setBoundAddress(boundAddress()); + } success = true; } finally { if (success == false) { @@ -279,7 +299,23 @@ protected void stopInternal() { @Override public void onException(HttpChannel channel, Exception cause) { - if (cause instanceof ReadTimeoutException) { + if (lifecycle.started() == false) { + return; + } + + if (SSLExceptionHelper.isNotSslRecordException(cause)) { + logger.warn("received plaintext http traffic on an https channel, closing connection {}", channel); + CloseableChannel.closeChannel(channel); + } else if (SSLExceptionHelper.isCloseDuringHandshakeException(cause)) { + logger.debug("connection {} closed during ssl handshake", channel); + CloseableChannel.closeChannel(channel); + } else if (SSLExceptionHelper.isInsufficientBufferRemainingException(cause)) { + logger.debug("connection {} closed abruptly", channel); + CloseableChannel.closeChannel(channel); + } else if (SSLExceptionHelper.isReceivedCertificateUnknownException(cause)) { + logger.warn("http client did not trust this server's certificate, closing connection {}", channel); + CloseableChannel.closeChannel(channel); + } else if (cause instanceof ReadTimeoutException) { super.onException(channel, new HttpReadTimeoutException(readTimeoutMillis, cause)); } else { super.onException(channel, cause); @@ -287,7 +323,7 @@ public void onException(HttpChannel channel, Exception cause) { } public ChannelHandler configureServerChannelHandler() { - return new HttpChannelHandler(this, handlingSettings); + return new HttpChannelHandler(this, handlingSettings, tlsConfig, acceptChannelPredicate); } static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("es-http-channel"); @@ -297,16 +333,35 @@ protected static class HttpChannelHandler extends ChannelInitializer { private final Netty4HttpServerTransport transport; private final HttpHandlingSettings handlingSettings; - - protected HttpChannelHandler(final Netty4HttpServerTransport transport, final HttpHandlingSettings handlingSettings) { + private final TLSConfig tlsConfig; + private final BiPredicate acceptChannelPredicate; + + protected HttpChannelHandler( + final Netty4HttpServerTransport transport, + final HttpHandlingSettings handlingSettings, + final TLSConfig tlsConfig, + @Nullable final BiPredicate acceptChannelPredicate + ) { this.transport = transport; this.handlingSettings = handlingSettings; + this.tlsConfig = tlsConfig; + this.acceptChannelPredicate = acceptChannelPredicate; } @Override protected void initChannel(Channel ch) throws Exception { Netty4HttpChannel nettyHttpChannel = new Netty4HttpChannel(ch); ch.attr(HTTP_CHANNEL_KEY).set(nettyHttpChannel); + if (acceptChannelPredicate != null) { + ch.pipeline() + .addLast( + "accept_channel_handler", + new AcceptChannelHandler(acceptChannelPredicate, HttpServerTransport.HTTP_PROFILE_NAME) + ); + } + if (tlsConfig.isTLSEnabled()) { + ch.pipeline().addLast("ssl", new SslHandler(tlsConfig.createServerSSLEngine())); + } ch.pipeline() .addLast("chunked_writer", new Netty4WriteThrottlingHandler(transport.getThreadPool().getThreadContext())) .addLast("byte_buf_sizer", NettyByteBufSizer.INSTANCE); @@ -324,7 +379,18 @@ protected void initChannel(Channel ch) throws Exception { ch.pipeline() .addLast("decoder", decoder) .addLast("decoder_compress", new HttpContentDecompressor()) - .addLast("encoder", new HttpResponseEncoder()) + .addLast("encoder", new HttpResponseEncoder() { + @Override + protected boolean isContentAlwaysEmpty(HttpResponse msg) { + // non-chunked responses (Netty4HttpResponse extends Netty's DefaultFullHttpResponse) with chunked transfer + // encoding are only sent by us in response to HEAD requests and must always have an empty body + if (msg instanceof Netty4HttpResponse netty4HttpResponse && HttpUtil.isTransferEncodingChunked(msg)) { + assert netty4HttpResponse.content().isReadable() == false; + return true; + } + return super.isContentAlwaysEmpty(msg); + } + }) .addLast("aggregator", aggregator); if (handlingSettings.compression()) { ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.compressionLevel())); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/AcceptChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/AcceptChannelHandler.java new file mode 100644 index 000000000000..993d70c6e89f --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/AcceptChannelHandler.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport.netty4; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.ipfilter.AbstractRemoteAddressFilter; + +import org.elasticsearch.common.transport.BoundTransportAddress; + +import java.net.InetSocketAddress; +import java.util.function.BiPredicate; + +@ChannelHandler.Sharable +public class AcceptChannelHandler extends AbstractRemoteAddressFilter { + + private final BiPredicate predicate; + private final String profile; + + public AcceptChannelHandler(final BiPredicate predicate, final String profile) { + this.predicate = predicate; + this.profile = profile; + } + + @Override + protected boolean accept(final ChannelHandlerContext ctx, final InetSocketAddress remoteAddress) throws Exception { + return predicate.test(profile, remoteAddress); + } + + public interface AcceptPredicate extends BiPredicate { + + void setBoundAddress(BoundTransportAddress boundHttpTransportAddress); + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java index e39994c05a44..e5d6042382b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.transport.netty4; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -78,7 +78,7 @@ public Map> getTransports( NETTY_TRANSPORT_NAME, () -> new Netty4Transport( settings, - Version.CURRENT, + TransportVersion.CURRENT, threadPool, networkService, pageCacheRecycler, @@ -112,7 +112,9 @@ public Map> getHttpTransports( dispatcher, clusterSettings, getSharedGroupFactory(settings), - tracer + tracer, + TLSConfig.noTLS(), + null ) ); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 563dc8c77ac3..2913cc21d133 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -26,7 +26,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -42,6 +42,7 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TransportSettings; @@ -98,7 +99,7 @@ public class Netty4Transport extends TcpTransport { public Netty4Transport( Settings settings, - Version version, + TransportVersion version, ThreadPool threadPool, NetworkService networkService, PageCacheRecycler pageCacheRecycler, @@ -267,7 +268,7 @@ protected ChannelHandler getServerChannelInitializer(String name) { return new ServerChannelInitializer(name); } - protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) { + protected ChannelHandler getClientChannelInitializer(DiscoveryNode node, ConnectionProfile connectionProfile) { return new ClientChannelInitializer(); } @@ -275,10 +276,10 @@ protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) { static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); @Override - protected Netty4TcpChannel initiateChannel(DiscoveryNode node) throws IOException { + protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException { InetSocketAddress address = node.getAddress().address(); Bootstrap bootstrapWithHandler = clientBootstrap.clone(); - bootstrapWithHandler.handler(getClientChannelInitializer(node)); + bootstrapWithHandler.handler(getClientChannelInitializer(node, connectionProfile)); bootstrapWithHandler.remoteAddress(address); ChannelFuture connectFuture = bootstrapWithHandler.connect(); @@ -288,7 +289,13 @@ protected Netty4TcpChannel initiateChannel(DiscoveryNode node) throws IOExceptio throw new IOException(connectFuture.cause()); } - Netty4TcpChannel nettyChannel = new Netty4TcpChannel(channel, false, "default", rstOnClose, connectFuture); + Netty4TcpChannel nettyChannel = new Netty4TcpChannel( + channel, + false, + connectionProfile.getTransportProfile(), + rstOnClose, + connectFuture + ); channel.attr(CHANNEL_KEY).set(nettyChannel); return nettyChannel; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java index 1a73c0d1a648..ea999ce0f471 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyAllocator.java @@ -98,7 +98,6 @@ public class NettyAllocator { maxOrder = 5; } } - int tinyCacheSize = PooledByteBufAllocator.defaultTinyCacheSize(); int smallCacheSize = PooledByteBufAllocator.defaultSmallCacheSize(); int normalCacheSize = PooledByteBufAllocator.defaultNormalCacheSize(); boolean useCacheForAllThreads = PooledByteBufAllocator.defaultUseCacheForAllThreads(); @@ -108,7 +107,6 @@ public class NettyAllocator { 0, pageSize, maxOrder, - tinyCacheSize, smallCacheSize, normalCacheSize, useCacheForAllThreads diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SSLExceptionHelper.java similarity index 87% rename from x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java rename to modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SSLExceptionHelper.java index 9bc63be18e04..19294c193605 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/SSLExceptionHelper.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/SSLExceptionHelper.java @@ -1,10 +1,12 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.core.security.transport; + +package org.elasticsearch.transport.netty4; import io.netty.handler.codec.DecoderException; import io.netty.handler.ssl.NotSslRecordException; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/TLSConfig.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/TLSConfig.java new file mode 100644 index 000000000000..b78fcd1a5a9b --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/TLSConfig.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport.netty4; + +import org.elasticsearch.common.ssl.SslConfiguration; + +import javax.net.ssl.SSLEngine; + +public record TLSConfig(SslConfiguration sslConfiguration, EngineProvider engineProvider) { + + public boolean isTLSEnabled() { + return sslConfiguration != null; + } + + public SSLEngine createServerSSLEngine() { + assert isTLSEnabled(); + SSLEngine sslEngine = engineProvider.create(sslConfiguration, null, -1); + sslEngine.setUseClientMode(false); + return sslEngine; + } + + public static TLSConfig noTLS() { + return new TLSConfig(null, null); + } + + @FunctionalInterface + public interface EngineProvider { + + SSLEngine create(SslConfiguration configuration, String host, int port); + } +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java index 08721c68a34c..4b0757dd5144 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java @@ -16,8 +16,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.util.MockPageCacheRecycler; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; @@ -30,6 +28,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.transport.netty4.TLSConfig; import org.junit.After; import org.junit.Before; @@ -45,13 +44,11 @@ public class Netty4BadRequestTests extends ESTestCase { private NetworkService networkService; - private PageCacheRecycler recycler; private ThreadPool threadPool; @Before public void setup() throws Exception { networkService = new NetworkService(Collections.emptyList()); - recycler = new MockPageCacheRecycler(Settings.EMPTY); threadPool = new TestThreadPool("test"); } @@ -88,7 +85,9 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(Settings.EMPTY), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { httpServerTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index eb510c50159a..2524be154414 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -21,15 +21,14 @@ import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpRequestEncoder; import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpVersion; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -175,8 +174,7 @@ private static class CountDownLatchHandler extends ChannelInitializer() { diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 0813fd5afc99..42573ea4fe3f 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.transport.netty4.TLSConfig; import org.junit.After; import org.junit.Before; @@ -104,7 +105,9 @@ class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 087f277c819e..8fd764299cfe 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; @@ -47,8 +48,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.MockPageCacheRecycler; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.AbstractHttpServerTransportTestCase; @@ -57,6 +56,7 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; +import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -66,6 +66,8 @@ import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.NettyAllocator; import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.transport.netty4.TLSConfig; +import org.elasticsearch.xcontent.ToXContent; import org.junit.After; import org.junit.Before; @@ -94,14 +96,12 @@ public class Netty4HttpServerTransportTests extends AbstractHttpServerTransportT private NetworkService networkService; private ThreadPool threadPool; - private PageCacheRecycler recycler; private ClusterSettings clusterSettings; @Before public void setup() throws Exception { networkService = new NetworkService(Collections.emptyList()); threadPool = new TestThreadPool("test"); - recycler = new MockPageCacheRecycler(Settings.EMPTY); clusterSettings = randomClusterSettings(); } @@ -112,7 +112,6 @@ public void shutdown() throws Exception { } threadPool = null; networkService = null; - recycler = null; clusterSettings = null; } @@ -176,7 +175,9 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, dispatcher, clusterSettings, new SharedGroupFactory(settings), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { transport.start(); @@ -225,7 +226,9 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(Settings.EMPTY), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { transport.start(); @@ -243,7 +246,9 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(settings), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); @@ -295,7 +300,9 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(settings), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { transport.start(); @@ -363,11 +370,13 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(Settings.EMPTY), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) { @Override public ChannelHandler configureServerChannelHandler() { - return new HttpChannelHandler(this, handlingSettings) { + return new HttpChannelHandler(this, handlingSettings, TLSConfig.noTLS(), null) { @Override protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); @@ -460,7 +469,9 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, randomClusterSettings(), new SharedGroupFactory(settings), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { transport.start(); @@ -530,7 +541,9 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, randomClusterSettings(), new SharedGroupFactory(settings), - Tracer.NOOP + Tracer.NOOP, + TLSConfig.noTLS(), + null ) ) { transport.start(); @@ -560,6 +573,69 @@ protected void initChannel(SocketChannel ch) { } } + public void testHeadRequestToChunkedApi() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + try { + channel.sendResponse( + new RestResponse( + OK, + ChunkedRestResponseBody.fromXContent( + ignored -> Iterators.single( + (builder, params) -> { throw new AssertionError("should not be called for HEAD REQUEST"); } + ), + ToXContent.EMPTY_PARAMS, + channel + ) + ) + ); + } catch (IOException e) { + throw new AssertionError(e); + } + } + + @Override + public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) { + throw new AssertionError(); + } + + }; + + final Settings settings = createSettings(); + try ( + Netty4HttpServerTransport transport = new Netty4HttpServerTransport( + settings, + networkService, + threadPool, + xContentRegistry(), + dispatcher, + clusterSettings, + new SharedGroupFactory(settings), + Tracer.NOOP, + TLSConfig.noTLS(), + null + ) + ) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (Netty4HttpClient client = new Netty4HttpClient()) { + final String url = "/some-head-endpoint"; + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.HEAD, url); + + final FullHttpResponse response = client.send(remoteAddress.address(), request); + try { + assertThat(response.status(), equalTo(HttpResponseStatus.OK)); + assertFalse(response.content().isReadable()); + } finally { + response.release(); + } + } + } + } + private Settings createSettings() { return createBuilderWithPort().build(); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 2229e46e522a..28db025015b6 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport.netty4; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -56,7 +56,7 @@ public void startThreadPool() { PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); nettyTransport = new Netty4Transport( settings, - Version.CURRENT, + TransportVersion.CURRENT, threadPool, networkService, recycler, diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java index 9648f19ac8c9..1d35a6ac0146 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.transport.netty4; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -110,7 +110,7 @@ private TcpTransport startTransport(Settings settings, ThreadPool threadPool) { PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); TcpTransport transport = new Netty4Transport( settings, - Version.CURRENT, + TransportVersion.CURRENT, threadPool, new NetworkService(Collections.emptyList()), recycler, diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 91a31f19f0e3..157e81e55af3 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport.netty4; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -24,10 +25,13 @@ import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TestProfiles; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportSettings; import java.io.IOException; import java.net.InetAddress; @@ -38,6 +42,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -45,7 +50,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase { @Override - protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { + protected Transport build(Settings settings, TransportVersion version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); return new Netty4Transport( settings, @@ -63,12 +68,12 @@ public void executeHandshake( DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, - ActionListener listener + ActionListener listener ) { if (doHandshake) { super.executeHandshake(node, channel, profile, listener); } else { - listener.onResponse(version.minimumCompatibilityVersion()); + listener.onResponse(version.calculateMinimumCompatVersion()); } } }; @@ -119,6 +124,50 @@ public void testDefaultKeepAliveSettings() throws IOException { } } + public void testTransportProfile() { + final String transportProfile = randomFrom( + TransportSettings.DEFAULT_PROFILE, + RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE, + randomAlphaOfLengthBetween(5, 12) + ); + final ConnectionProfile connectionProfile = ConnectionProfile.resolveConnectionProfile( + new ConnectionProfile.Builder().setTransportProfile(transportProfile) + .addConnections( + 1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE + ) + .build(), + TestProfiles.LIGHT_PROFILE + ); + + try ( + MockTransportService serviceC = buildService("TS_C", Version.CURRENT, Settings.EMPTY); + MockTransportService serviceD = buildService("TS_D", Version.CURRENT, Settings.EMPTY) + ) { + + try (Transport.Connection connection = openConnection(serviceC, serviceD.getLocalDiscoNode(), connectionProfile)) { + assertThat(connection, instanceOf(StubbableTransport.WrappedConnection.class)); + Transport.Connection conn = ((StubbableTransport.WrappedConnection) connection).getConnection(); + assertThat(conn, instanceOf(TcpTransport.NodeChannels.class)); + TcpTransport.NodeChannels nodeChannels = (TcpTransport.NodeChannels) conn; + for (TcpChannel channel : nodeChannels.getChannels()) { + assertFalse(channel.isServerChannel()); + assertThat(channel.getProfile(), equalTo(transportProfile)); + } + + assertThat(serviceD.getOriginalTransport(), instanceOf(TcpTransport.class)); + for (TcpChannel channel : getAcceptedChannels((TcpTransport) serviceD.getOriginalTransport())) { + assertTrue(channel.isServerChannel()); + assertThat(channel.getProfile(), equalTo(TransportSettings.DEFAULT_PROFILE)); + } + } + } + } + private void checkDefaultKeepAliveOptions(TcpChannel channel) throws IOException { assertThat(channel, instanceOf(Netty4TcpChannel.class)); Netty4TcpChannel nettyChannel = (Netty4TcpChannel) channel; diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index b10ca2b6dd49..1c7db6d040be 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -7,8 +7,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle index 0a9cc629e7a4..a91e34018179 100644 --- a/plugins/analysis-kuromoji/build.gradle +++ b/plugins/analysis-kuromoji/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch.' diff --git a/plugins/analysis-nori/build.gradle b/plugins/analysis-nori/build.gradle index 4c545d4f72d4..51e93bf6cc2c 100644 --- a/plugins/analysis-nori/build.gradle +++ b/plugins/analysis-nori/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch.' diff --git a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java index 994ebffb9af6..c9fbef0d20ad 100644 --- a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java +++ b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java @@ -75,7 +75,7 @@ public void testNoriAnalyzer() throws Exception { public void testNoriAnalyzerUserDict() throws Exception { Settings settings = Settings.builder() .put("index.analysis.analyzer.my_analyzer.type", "nori") - .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C샤프", "세종", "세종시 세종 시") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종시 세종 시") .build(); TestAnalysis analysis = createTestAnalysis(settings); Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); @@ -108,7 +108,7 @@ public void testNoriAnalyzerInvalidUserDictOption() throws Exception { Settings settings = Settings.builder() .put("index.analysis.analyzer.my_analyzer.type", "nori") .put("index.analysis.analyzer.my_analyzer.user_dictionary", "user_dict.txt") - .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C샤프", "세종", "세종시 세종 시") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종시 세종 시") .build(); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); assertThat( diff --git a/plugins/analysis-nori/src/test/resources/org/elasticsearch/plugin/analysis/nori/user_dict.txt b/plugins/analysis-nori/src/test/resources/org/elasticsearch/plugin/analysis/nori/user_dict.txt index 63c1c3a1e224..b70fb43a0092 100644 --- a/plugins/analysis-nori/src/test/resources/org/elasticsearch/plugin/analysis/nori/user_dict.txt +++ b/plugins/analysis-nori/src/test/resources/org/elasticsearch/plugin/analysis/nori/user_dict.txt @@ -1,5 +1,5 @@ # Additional nouns c++ -C샤프 +C쁠쁠 세종 -세종시 세종 시 \ No newline at end of file +세종시 세종 시 diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle index 96149295b496..7646c0ee874d 100644 --- a/plugins/analysis-phonetic/build.gradle +++ b/plugins/analysis-phonetic/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch.' diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle index dd577a550e8f..c75c5c304048 100644 --- a/plugins/analysis-smartcn/build.gradle +++ b/plugins/analysis-smartcn/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch.' diff --git a/plugins/analysis-stempel/build.gradle b/plugins/analysis-stempel/build.gradle index 363d923492e0..ca6d91df5cd1 100644 --- a/plugins/analysis-stempel/build.gradle +++ b/plugins/analysis-stempel/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch.' diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index 67aa47db8bbd..709b8831628b 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -5,8 +5,8 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch.' diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index de47739d382b..3db7c21309b5 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -8,7 +8,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 1e48fa537e43..b21f6224c9fc 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -7,7 +7,7 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index e8afba63970c..c7c4ecba40fe 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -10,11 +10,11 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.test.AntFixture import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.internal.test.rest.InternalYamlRestTestPlugin +import org.elasticsearch.gradle.internal.test.rest.LegacyYamlRestTestPlugin import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' dependencies { yamlRestTestImplementation project(':plugins:discovery-ec2') @@ -62,7 +62,7 @@ tasks.named("yamlRestTest").configure { enabled = false } def yamlRestTestTask = tasks.register("yamlRestTest${action}", RestIntegTestTask) { dependsOn fixture SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); - SourceSet yamlRestTestSourceSet = sourceSets.getByName(InternalYamlRestTestPlugin.SOURCE_SET_NAME) + SourceSet yamlRestTestSourceSet = sourceSets.getByName(LegacyYamlRestTestPlugin.SOURCE_SET_NAME) testClassesDirs = yamlRestTestSourceSet.getOutput().getClassesDirs() classpath = yamlRestTestSourceSet.getRuntimeClasspath() } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/EC2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/EC2RetriesTests.java index 806ae2ff9d82..b0a8242a7ca5 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/EC2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/EC2RetriesTests.java @@ -14,7 +14,7 @@ import org.apache.http.HttpStatus; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -50,7 +50,7 @@ protected MockTransportService createTransportService() { Settings.EMPTY, new Netty4Transport( Settings.EMPTY, - Version.CURRENT, + TransportVersion.CURRENT, threadPool, networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index a06a76779516..87f9689f5a4d 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -17,7 +17,7 @@ import org.apache.http.HttpStatus; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -63,7 +63,7 @@ public class Ec2DiscoveryTests extends AbstractEC2MockAPITestCase { protected MockTransportService createTransportService() { final Transport transport = new Netty4Transport( Settings.EMPTY, - Version.CURRENT, + TransportVersion.CURRENT, threadPool, new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index cfd1609078de..75a8095ef412 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -1,4 +1,4 @@ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { @@ -9,8 +9,7 @@ esplugin { versions << [ 'google' : '1.41.1', 'google_api_client' : '1.33.1', - 'api_services_compute': 'v1-rev20220322-1.32.1', - 'google_oauth_client' : '1.33.0', + 'api_services_compute': 'v1-rev20220322-1.32.1' ] dependencies { diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index c2cd26c2e928..7eb6e85903ba 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -13,7 +13,7 @@ import org.elasticsearch.gradle.internal.test.AntFixture import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' final int gceNumberOfNodes = 3 diff --git a/plugins/examples/build.gradle b/plugins/examples/build.gradle index 263d73b128f0..e983292f7ab3 100644 --- a/plugins/examples/build.gradle +++ b/plugins/examples/build.gradle @@ -29,6 +29,14 @@ subprojects { } } } + if (gradle.includedBuilds.isEmpty()) { + maven { + url = "https://artifacts-snapshot.elastic.co/elasticsearch/${elasticsearchVersion}/maven" + mavenContent { + includeModule 'org.elasticsearch', 'elasticsearch' + } + } + } // Same for Lucene, add the snapshot repo based on the currently used Lucene version def luceneVersion = VersionProperties.getLucene() diff --git a/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java b/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java index 1c0d9601814d..d43374b3b5c6 100644 --- a/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java +++ b/plugins/examples/custom-significance-heuristic/src/test/java/org/elasticsearch/example/customsigheuristic/SimpleHeuristicWireTests.java @@ -43,4 +43,9 @@ protected Reader instanceReader() { protected SimpleHeuristic createTestInstance() { return new SimpleHeuristic(); } + + @Override + protected SimpleHeuristic mutateInstance(SimpleHeuristic instance) throws IOException { + return null; + } } diff --git a/plugins/examples/custom-suggester/src/main/java/org/elasticsearch/example/customsuggester/CustomSuggestionBuilder.java b/plugins/examples/custom-suggester/src/main/java/org/elasticsearch/example/customsuggester/CustomSuggestionBuilder.java index fa4056515a5b..827f3e837bde 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/elasticsearch/example/customsuggester/CustomSuggestionBuilder.java +++ b/plugins/examples/custom-suggester/src/main/java/org/elasticsearch/example/customsuggester/CustomSuggestionBuilder.java @@ -8,17 +8,17 @@ package org.elasticsearch.example.customsuggester; -import org.elasticsearch.Version; -import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.SuggestionSearchContext; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.HashMap; @@ -131,7 +131,7 @@ public SuggestionSearchContext.SuggestionContext build(SearchExecutionContext co } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java index f599ddb63def..8691ab4e4f6e 100644 --- a/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java +++ b/plugins/examples/rescore/src/main/java/org/elasticsearch/example/rescore/ExampleRescoreBuilder.java @@ -13,24 +13,24 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.LeafFieldData; import org.elasticsearch.index.fielddata.LeafNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.rescore.Rescorer; import org.elasticsearch.search.rescore.RescorerBuilder; -import org.elasticsearch.Version; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Arrays; @@ -217,7 +217,7 @@ public Explanation explain(int topLevelDocId, IndexSearcher searcher, RescoreCon } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/plugins/examples/security-authorization-engine/src/main/java/org/elasticsearch/example/CustomAuthorizationEngine.java b/plugins/examples/security-authorization-engine/src/main/java/org/elasticsearch/example/CustomAuthorizationEngine.java index 998fe8894ee3..1eeb32ed1346 100644 --- a/plugins/examples/security-authorization-engine/src/main/java/org/elasticsearch/example/CustomAuthorizationEngine.java +++ b/plugins/examples/security-authorization-engine/src/main/java/org/elasticsearch/example/CustomAuthorizationEngine.java @@ -31,6 +31,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.function.Supplier; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -102,12 +103,30 @@ public void authorizeIndexAction(RequestInfo requestInfo, AuthorizationInfo auth } @Override - public void loadAuthorizedIndices(RequestInfo requestInfo, AuthorizationInfo authorizationInfo, - Map indicesLookup, ActionListener> listener) { + public void loadAuthorizedIndices( + RequestInfo requestInfo, + AuthorizationInfo authorizationInfo, + Map indicesLookup, + ActionListener listener + ) { if (isSuperuser(requestInfo.getAuthentication().getEffectiveSubject().getUser())) { - listener.onResponse(indicesLookup.keySet()); + listener.onResponse(new AuthorizedIndices() { + public Supplier> all() { + return () -> indicesLookup.keySet(); + } + public boolean check(String name) { + return indicesLookup.containsKey(name); + } + }); } else { - listener.onResponse(Collections.emptySet()); + listener.onResponse(new AuthorizedIndices() { + public Supplier> all() { + return () -> Set.of(); + } + public boolean check(String name) { + return false; + } + }); } } diff --git a/plugins/examples/stable-analysis/build.gradle b/plugins/examples/stable-analysis/build.gradle index 69e44c46be9f..7686be80a2a1 100644 --- a/plugins/examples/stable-analysis/build.gradle +++ b/plugins/examples/stable-analysis/build.gradle @@ -10,18 +10,19 @@ esplugin { dependencies { //TODO transitive dependency off and plugin-api dependency? - compileOnly "org.elasticsearch:elasticsearch-plugin-api:${elasticsearchVersion}" - compileOnly "org.elasticsearch:elasticsearch-plugin-analysis-api:${elasticsearchVersion}" + compileOnly "org.elasticsearch.plugin:elasticsearch-plugin-api:${elasticsearchVersion}" + compileOnly "org.elasticsearch.plugin:elasticsearch-plugin-analysis-api:${elasticsearchVersion}" compileOnly "org.apache.lucene:lucene-analysis-common:${luceneVersion}" //TODO for testing this also have to be declared - testImplementation "org.elasticsearch:elasticsearch-plugin-api:${elasticsearchVersion}" - testImplementation "org.elasticsearch:elasticsearch-plugin-analysis-api:${elasticsearchVersion}" + testImplementation "org.elasticsearch.plugin:elasticsearch-plugin-api:${elasticsearchVersion}" + testImplementation "org.elasticsearch.plugin:elasticsearch-plugin-analysis-api:${elasticsearchVersion}" testImplementation "org.apache.lucene:lucene-analysis-common:${luceneVersion}" testImplementation ('junit:junit:4.13.2'){ exclude group: 'org.hamcrest' } + testImplementation 'org.mockito:mockito-core:4.4.0' testImplementation 'org.hamcrest:hamcrest:2.2' } diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/CharacterSkippingTokenizerFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/CharacterSkippingTokenizerFactory.java new file mode 100644 index 000000000000..570dc55c2538 --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/CharacterSkippingTokenizerFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.example.analysis.lucene.CharSkippingTokenizer; +import org.elasticsearch.plugin.analysis.TokenizerFactory; +import org.elasticsearch.plugin.Inject; +import org.elasticsearch.plugin.NamedComponent; + +import java.util.List; + +@NamedComponent("example_tokenizer_factory") +public class CharacterSkippingTokenizerFactory implements TokenizerFactory { + private final List tokenizerListOfChars; + + @Inject + public CharacterSkippingTokenizerFactory(ExampleAnalysisSettings settings) { + this.tokenizerListOfChars = settings.singleCharsToSkipInTokenizer(); + } + + @Override + public Tokenizer create() { + return new CharSkippingTokenizer(tokenizerListOfChars); + } +} + diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/CustomAnalyzerFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/CustomAnalyzerFactory.java new file mode 100644 index 000000000000..b546a1da8c6f --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/CustomAnalyzerFactory.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.elasticsearch.example.analysis.lucene.CharSkippingTokenizer; +import org.elasticsearch.example.analysis.lucene.ReplaceCharToNumber; +import org.elasticsearch.example.analysis.lucene.SkipStartingWithDigitTokenFilter; +import org.elasticsearch.plugin.analysis.AnalyzerFactory; +import org.elasticsearch.plugin.NamedComponent; +import org.elasticsearch.plugin.Inject; + +import java.util.List; + +@NamedComponent( "example_analyzer_factory") +public class CustomAnalyzerFactory implements AnalyzerFactory { + private final ExampleAnalysisSettings settings; + + @Inject + public CustomAnalyzerFactory(ExampleAnalysisSettings settings) { + this.settings = settings; + } + + @Override + public Analyzer create() { + return new CustomAnalyzer(settings); + } + + static class CustomAnalyzer extends Analyzer { + + private final ExampleAnalysisSettings settings; + + public CustomAnalyzer(ExampleAnalysisSettings settings) { + this.settings = settings; + } + + @Override + protected TokenStreamComponents createComponents(String fieldName) { + var tokenizerListOfChars = settings.singleCharsToSkipInTokenizer().isEmpty() ? List.of("_") : settings.singleCharsToSkipInTokenizer(); + var tokenizer = new CharSkippingTokenizer(tokenizerListOfChars); + + long tokenFilterNumber = settings.analyzerUseTokenListOfChars() ? settings.digitToSkipInTokenFilter() : -1; + var tokenFilter = new SkipStartingWithDigitTokenFilter(tokenizer, tokenFilterNumber); + return new TokenStreamComponents( + r -> tokenizer.setReader(new ReplaceCharToNumber(r, settings.oldCharToReplaceInCharFilter(), settings.newNumberToReplaceInCharFilter())), + tokenFilter + ); + } + } +} + diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleAnalysisSettings.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleAnalysisSettings.java new file mode 100644 index 000000000000..024b28e468da --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleAnalysisSettings.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis; + +import org.elasticsearch.plugin.settings.AnalysisSettings; +import org.elasticsearch.plugin.settings.BooleanSetting; +import org.elasticsearch.plugin.settings.IntSetting; +import org.elasticsearch.plugin.settings.LongSetting; +import org.elasticsearch.plugin.settings.StringSetting; +import org.elasticsearch.plugin.settings.ListSetting; + +import java.util.List; + +/** + * This is a settings interface that will be injected into a plugins' constructors + * annotated with @Inject. + * The settings interface has to be annotated with @AnalysisSettings so that Elasticsearch can generate + * its dynamic implementation. + * Methods on the interface should be noarg and have a returned type. + * For types supported see the plugin-api/org.elasticsearch.plugin.api.settings classes + * @see CustomAnalyzerFactory an example injection of this interface + */ +@AnalysisSettings +public interface ExampleAnalysisSettings { + /* + * This method presents the use of String typed setting. + * see the ReplaceCharWithNumberCharFilterFactory + */ + @StringSetting(path = "old_char", defaultValue = " ") + String oldCharToReplaceInCharFilter(); + + /* + * This method presents the use of int typed setting. + * see the ReplaceCharWithNumberCharFilterFactory + */ + @IntSetting(path = "new_number", defaultValue = 0) + int newNumberToReplaceInCharFilter(); + + /* + * This method presents the use of long typed setting. + * see the ExampleTokenFilterFactory + */ + @LongSetting(path = "token_filter_number", defaultValue = 0L) + long digitToSkipInTokenFilter(); + + /* + * This method presents the use of boolean typed setting. + * see the ExampleAnalyzerFactory + */ + @BooleanSetting(path = "analyzerUseTokenListOfChars", defaultValue = true) + boolean analyzerUseTokenListOfChars(); + /* + * This method presents the use of boolean typed setting. + * see the ExampleTokenizerFactory + */ + @ListSetting(path = "tokenizer_list_of_chars") + List singleCharsToSkipInTokenizer(); +} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleAnalyzerFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleAnalyzerFactory.java deleted file mode 100644 index 053b8fffb451..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleAnalyzerFactory.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis; - -import org.apache.lucene.analysis.Analyzer; -import org.elasticsearch.example.analysis.lucene.ReplaceHash; -import org.elasticsearch.example.analysis.lucene.Skip1TokenFilter; -import org.elasticsearch.example.analysis.lucene.UnderscoreTokenizer; -import org.elasticsearch.plugin.api.NamedComponent; - -@NamedComponent( "example_analyzer_factory") -public class ExampleAnalyzerFactory implements org.elasticsearch.plugin.analysis.api.AnalyzerFactory { - - @Override - //TODO guide lucene - public Analyzer create() { - return new CustomAnalyzer(); - } - - static class CustomAnalyzer extends Analyzer { - - @Override - protected TokenStreamComponents createComponents(String fieldName) { - var tokenizer = new UnderscoreTokenizer(); - var tokenFilter = new Skip1TokenFilter(tokenizer); - return new TokenStreamComponents(r -> tokenizer.setReader(new ReplaceHash(r)), tokenFilter); - } - } -} - diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleCharFilterFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleCharFilterFactory.java deleted file mode 100644 index e918aa77b6cc..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleCharFilterFactory.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis; - -import org.apache.lucene.util.SuppressForbidden; -import org.elasticsearch.example.analysis.lucene.ReplaceHash; -import org.elasticsearch.plugin.analysis.api.CharFilterFactory; -import org.elasticsearch.plugin.api.NamedComponent; - -import java.io.Reader; - -@NamedComponent( "example_char_filter") -public class ExampleCharFilterFactory implements CharFilterFactory { - @Override - public Reader create(Reader reader) { - return new ReplaceHash(reader); - } -} - - diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleTokenFilterFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleTokenFilterFactory.java deleted file mode 100644 index 4f8e20455c36..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleTokenFilterFactory.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis; - -import org.apache.lucene.analysis.TokenStream; -import org.elasticsearch.example.analysis.lucene.AppendTokenFilter; -import org.elasticsearch.example.analysis.lucene.Skip1TokenFilter; -import org.elasticsearch.plugin.analysis.api.AnalysisMode; -import org.elasticsearch.plugin.api.NamedComponent; - -@NamedComponent( "example_token_filter_factory") -public class ExampleTokenFilterFactory implements org.elasticsearch.plugin.analysis.api.TokenFilterFactory { - @Override - public TokenStream create(TokenStream tokenStream) { - return new Skip1TokenFilter(tokenStream); - } - - @Override - public TokenStream normalize(TokenStream tokenStream) { - return new AppendTokenFilter(tokenStream, "1"); - } - - @Override - public AnalysisMode getAnalysisMode() { - return org.elasticsearch.plugin.analysis.api.TokenFilterFactory.super.getAnalysisMode(); - } - -} - diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleTokenizerFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleTokenizerFactory.java deleted file mode 100644 index dbd55fd9dfe5..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ExampleTokenizerFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis; - -import org.apache.lucene.analysis.Tokenizer; -import org.elasticsearch.example.analysis.lucene.UnderscoreTokenizer; -import org.elasticsearch.plugin.analysis.api.TokenizerFactory; -import org.elasticsearch.plugin.api.NamedComponent; - -@NamedComponent( "example_tokenizer_factory") -public class ExampleTokenizerFactory implements TokenizerFactory { - @Override - public Tokenizer create() { - return new UnderscoreTokenizer(); - } - -} - diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ReplacingCharFilterFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ReplacingCharFilterFactory.java new file mode 100644 index 000000000000..91b509353b6d --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/ReplacingCharFilterFactory.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis; + +import org.elasticsearch.example.analysis.lucene.ReplaceCharToNumber; +import org.elasticsearch.plugin.NamedComponent; +import org.elasticsearch.plugin.Inject; +import org.elasticsearch.plugin.analysis.CharFilterFactory; + +import java.io.Reader; + +@NamedComponent( "example_char_filter") +public class ReplacingCharFilterFactory implements CharFilterFactory { + private final String oldChar; + private final int newNumber; + + @Inject + public ReplacingCharFilterFactory(ExampleAnalysisSettings settings) { + oldChar = settings.oldCharToReplaceInCharFilter(); + newNumber = settings.newNumberToReplaceInCharFilter(); + } + + @Override + public Reader create(Reader reader) { + return new ReplaceCharToNumber(reader, oldChar, newNumber); + } +} + + diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/SkippingTokenFilterFactory.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/SkippingTokenFilterFactory.java new file mode 100644 index 000000000000..75de12b5f6ca --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/SkippingTokenFilterFactory.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis; + +import org.apache.lucene.analysis.TokenStream; +import org.elasticsearch.example.analysis.lucene.SkipStartingWithDigitTokenFilter; +import org.elasticsearch.plugin.analysis.AnalysisMode; +import org.elasticsearch.plugin.analysis.TokenFilterFactory; +import org.elasticsearch.plugin.NamedComponent; +import org.elasticsearch.plugin.Inject; + +@NamedComponent( "example_token_filter_factory") +public class SkippingTokenFilterFactory implements TokenFilterFactory { + private final long tokenFilterNumber; + + @Inject + public SkippingTokenFilterFactory(ExampleAnalysisSettings settings) { + this.tokenFilterNumber = settings.digitToSkipInTokenFilter(); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new SkipStartingWithDigitTokenFilter(tokenStream, tokenFilterNumber); + } + + @Override + public TokenStream normalize(TokenStream tokenStream) { + return new SkipStartingWithDigitTokenFilter(tokenStream, tokenFilterNumber); + } + + @Override + public AnalysisMode getAnalysisMode() { + return TokenFilterFactory.super.getAnalysisMode(); + } + +} + diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/CharSkippingTokenizer.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/CharSkippingTokenizer.java new file mode 100644 index 000000000000..326ae870e5e9 --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/CharSkippingTokenizer.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis.lucene; + +import org.apache.lucene.analysis.util.CharTokenizer; + +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +/* +A tokenizer that skips configured tokens. +configured tokens are passed as list of single char strings. + */ +public class CharSkippingTokenizer extends CharTokenizer { + + private final Set setOfChars; + + public CharSkippingTokenizer(List tokenizerListOfChars) { + this.setOfChars = tokenizerListOfChars.stream().map(s -> (int) s.charAt(0)).collect(Collectors.toSet()); + } + + @Override + protected boolean isTokenChar(int c) { + return setOfChars.contains(c) == false; + } +} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceCharToNumber.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceCharToNumber.java new file mode 100644 index 000000000000..262748a129ff --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceCharToNumber.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis.lucene; + +import org.apache.lucene.analysis.charfilter.MappingCharFilter; +import org.apache.lucene.analysis.charfilter.NormalizeCharMap; + +import java.io.Reader; + +public class ReplaceCharToNumber extends MappingCharFilter { + + public ReplaceCharToNumber(Reader in, String oldChar, int newNumber) { + super(charMap(oldChar, newNumber), in); + } + + private static NormalizeCharMap charMap(String oldChar, int newNumber) { + NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); + builder.add(oldChar, String.valueOf(newNumber)); + return builder.build(); + } +} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceHash.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceHash.java deleted file mode 100644 index 062ce694b76f..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/ReplaceHash.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis.lucene; - -import org.apache.lucene.analysis.charfilter.MappingCharFilter; -import org.apache.lucene.analysis.charfilter.NormalizeCharMap; - -import java.io.Reader; - -public class ReplaceHash extends MappingCharFilter { - - public ReplaceHash(Reader in) { - super(charMap(), in); - } - - private static NormalizeCharMap charMap() { - NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder(); - builder.add("#", "3"); - return builder.build(); - } -} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/Skip1TokenFilter.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/Skip1TokenFilter.java deleted file mode 100644 index 7e84e580dfea..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/Skip1TokenFilter.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis.lucene; - -import org.apache.lucene.analysis.FilteringTokenFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; - -import java.io.IOException; - -public class Skip1TokenFilter extends FilteringTokenFilter { - - private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - - public Skip1TokenFilter(TokenStream in) { - super(in); - } - - @Override - protected boolean accept() throws IOException { - return termAtt.buffer()[0] != '1'; - } -} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/SkipStartingWithDigitTokenFilter.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/SkipStartingWithDigitTokenFilter.java new file mode 100644 index 000000000000..0ddfa51af562 --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/SkipStartingWithDigitTokenFilter.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis.lucene; + +import org.apache.lucene.analysis.FilteringTokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; + +import java.io.IOException; + +public class SkipStartingWithDigitTokenFilter extends FilteringTokenFilter { + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final long tokenFilterNumber; + + public SkipStartingWithDigitTokenFilter(TokenStream in, long tokenFilterNumber) { + super(in); + this.tokenFilterNumber = tokenFilterNumber; + } + + @Override + protected boolean accept() throws IOException { + return termAtt.buffer()[0] != (char) (tokenFilterNumber + '0'); + } +} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/UnderscoreTokenizer.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/UnderscoreTokenizer.java deleted file mode 100644 index d3172cfb06aa..000000000000 --- a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/lucene/UnderscoreTokenizer.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis.lucene; - -import org.apache.lucene.analysis.util.CharTokenizer; - -public class UnderscoreTokenizer extends CharTokenizer { - - @Override - protected boolean isTokenChar(int c) { - return c != '_'; - } -} diff --git a/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/package-info.java b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/package-info.java new file mode 100644 index 000000000000..5368b1f72d47 --- /dev/null +++ b/plugins/examples/stable-analysis/src/main/java/org/elasticsearch/example/analysis/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +/** + * Implementation notes to all components: + *
    + *
  • a @NamedComponent annotation with a name is required in order for the component to be found by Elasticsearch
  • + *
  • a constructor is annotated with @Inject and has a settings interface as an argument. See the javadoc for the
  • + *
+ * ExampleAnalysisSettings for more details: + *
    + *
  • a no/noarg constructor is also possible
  • + *
  • a methods from stable analysis api are to be implemented with Apache Lucene
  • + *
+ */ +package org.elasticsearch.example.analysis; diff --git a/plugins/examples/stable-analysis/src/test/java/org/elasticsearch/example/analysis/ExampleCharFilterFactoryTests.java b/plugins/examples/stable-analysis/src/test/java/org/elasticsearch/example/analysis/ExampleCharFilterFactoryTests.java deleted file mode 100644 index 4a81e2c66c6c..000000000000 --- a/plugins/examples/stable-analysis/src/test/java/org/elasticsearch/example/analysis/ExampleCharFilterFactoryTests.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.example.analysis; - - -import org.elasticsearch.plugin.analysis.api.CharFilterFactory; -import org.junit.Test; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.equalTo; - -public class ExampleCharFilterFactoryTests { - @Test - public void exampleCharFilterIsAnnotatedWithName() { - CharFilterFactory charFilterFactory = new ExampleCharFilterFactory(); - assertThat(charFilterFactory.name(), equalTo("example_char_filter")); - } -} diff --git a/plugins/examples/stable-analysis/src/test/java/org/elasticsearch/example/analysis/ReplacingCharFilterFactoryTests.java b/plugins/examples/stable-analysis/src/test/java/org/elasticsearch/example/analysis/ReplacingCharFilterFactoryTests.java new file mode 100644 index 000000000000..9859e9a59585 --- /dev/null +++ b/plugins/examples/stable-analysis/src/test/java/org/elasticsearch/example/analysis/ReplacingCharFilterFactoryTests.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.example.analysis; + + +import org.elasticsearch.plugin.analysis.CharFilterFactory; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +public class ReplacingCharFilterFactoryTests { + @Test + public void exampleCharFilterIsAnnotatedWithName() { + ExampleAnalysisSettings settings = Mockito.mock(ExampleAnalysisSettings.class); + CharFilterFactory charFilterFactory = new ReplacingCharFilterFactory(settings); + assertThat(charFilterFactory.name(), equalTo("example_char_filter")); + } +} diff --git a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/20_char_filter.yml b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/20_char_filter.yml index ed9489b06abb..c88cf3ca7aae 100644 --- a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/20_char_filter.yml +++ b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/20_char_filter.yml @@ -1,8 +1,7 @@ ## Smoke tests for char filters included in the analysis-common module -#todo this can be expanded with parameters once settings support is added --- -"Stable char_filter plugin. Replace # with 3": +"Stable char_filter plugin. Replace char with number": - do: indices.analyze: body: @@ -10,22 +9,65 @@ tokenizer: standard char_filter: - type: example_char_filter + old_char: "#" + new_number: 3 - length: { tokens: 1 } - match: { tokens.0.token: "t3st" } - do: indices.analyze: body: - text: t#st + text: t st explain: true tokenizer: standard char_filter: - type: example_char_filter - match: { detail.custom_analyzer: true } - length: { detail.charfilters.0.filtered_text: 1 } - - match: { detail.charfilters.0.filtered_text.0: "t3st" } + - match: { detail.charfilters.0.filtered_text.0: "t0st" } - length: { detail.tokenizer.tokens: 1 } - - match: { detail.tokenizer.tokens.0.token: "t3st" } + - match: { detail.tokenizer.tokens.0.token: "t0st" } - match: { detail.tokenizer.tokens.0.start_offset: 0 } - match: { detail.tokenizer.tokens.0.end_offset: 4 } - match: { detail.tokenizer.tokens.0.position: 0 } + +--- +"Index and search with stable plugin char filter": + - do: + indices.create: + index: test + body: + settings: + analysis: + char_filter: + my_char_filter: + type: example_char_filter + old_char: "#" + new_number: 4 + analyzer: + my_analyzer: + tokenizer: standard + char_filter: ["my_char_filter" + ] + mappings: + properties: + text: + type: text + analyzer: my_analyzer + - do: + index: + index: test + id: "1" + body: { "text": "t#st" } + - do: + indices.refresh: { } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + text: t4st + - match: { hits.total: 1 } diff --git a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/30_token_filter.yml b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/30_token_filter.yml index 818649ff14ae..3d09633fbad0 100644 --- a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/30_token_filter.yml +++ b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/30_token_filter.yml @@ -10,9 +10,11 @@ tokenizer: standard filter: - type: example_token_filter_factory - - length: { tokens: 2 } - - match: { tokens.0.token: "2test" } - - match: { tokens.1.token: "3test" } + token_filter_number: 2 + - length: { tokens: 3 } + - match: { tokens.0.token: "1test" } + - match: { tokens.1.token: "1test" } + - match: { tokens.2.token: "3test" } - do: indices.analyze: @@ -22,7 +24,49 @@ tokenizer: standard filter: - type: example_token_filter_factory + token_filter_number: 1 - match: { detail.custom_analyzer: true } - length: { detail.tokenfilters.0.tokens: 2 } - match: { detail.tokenfilters.0.tokens.0.token: "2test" } - match: { detail.tokenfilters.0.tokens.1.token: "3test" } + + +--- +"Index and search with stable plugin token filter": + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_token_filter: + type: example_token_filter_factory + token_filter_number: 1 + analyzer: + my_analyzer: + tokenizer: standard + filter: ["my_token_filter" + ] + mappings: + properties: + text: + type: text + analyzer: my_analyzer + - do: + index: + index: test + id: "1" + body: { "text": "1test 2test 1test 3test" } + - do: + indices.refresh: { } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + text: 2test + - match: { hits.total: 1 } diff --git a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/40_tokenizer.yml b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/40_tokenizer.yml index 0a76cbbae9af..7b15ccd3d4cd 100644 --- a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/40_tokenizer.yml +++ b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/40_tokenizer.yml @@ -6,8 +6,10 @@ - do: indices.analyze: body: - text: x_y_z - tokenizer: example_tokenizer_factory + text: x y_z + tokenizer: + type: example_tokenizer_factory + tokenizer_list_of_chars: ["_", " "] - length: { tokens: 3 } - match: { tokens.0.token: "x" } - match: { tokens.1.token: "y" } @@ -18,7 +20,9 @@ body: text: x_y_z explain: true - tokenizer: example_tokenizer_factory + tokenizer: + type: example_tokenizer_factory + tokenizer_list_of_chars: ["_", " "] - match: { detail.custom_analyzer: true } - length: { detail.tokenizer.tokens: 3 } - match: { detail.tokenizer.tokens.0.token: "x" } diff --git a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/50_analyzer.yml b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/50_analyzer.yml index 8a536ffda14d..46ee6b8e15f1 100644 --- a/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/50_analyzer.yml +++ b/plugins/examples/stable-analysis/src/yamlRestTest/resources/rest-api-spec/test/analysis/50_analyzer.yml @@ -2,23 +2,62 @@ #todo this can be expanded with parameters once settings support is added --- -"Stable analyzer provider plugin. It combines a underscore tokenizer, token filter which skips 1 and char filter which replaces # with 3": +"Stable analyzer provider plugin. It combines a underscore tokenizer, token filter which skips 0 and char filter which replaces # with 3": +#analyze request only allows analyzer field to be string (cannot be a NameOrDefinition) +#tokenizes by _, replaces space with 0 and skips tokens staring with 0 - do: indices.analyze: body: - text: 1x_y_#z + text: 0x_y_ z analyzer: example_analyzer_factory - - length: { tokens: 2 } + - length: { tokens: 1 } - match: { tokens.0.token: "y" } - - match: { tokens.1.token: "3z" } - do: indices.analyze: body: - text: 1x_y_#z + text: 0x_y_ z explain: true analyzer: example_analyzer_factory - match: { detail.custom_analyzer: false } #?? why not custom? - - length: { detail.analyzer.tokens: 2 } + - length: { detail.analyzer.tokens: 1 } - match: { detail.analyzer.tokens.0.token: "y" } - - match: { detail.analyzer.tokens.1.token: "3z" } + + +--- +"Index and search with stable plugin analyzer": + - do: + indices.create: + index: test + body: + settings: + analysis: + analyzer: + my_analyzer: + type: example_analyzer_factory + old_char: "#" + new_number: 3 + tokenizer_list_of_chars: [ "_", " " ] + token_filter_number: 2 + mappings: + properties: + text: + type: text + analyzer: my_analyzer + - do: + index: + index: test + id: "1" + body: { "text": "1x_y_#z" } + - do: + indices.refresh: { } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + text: 3z + - match: { hits.total: 1 } diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index 633cc3d3dcbb..275e7f5267e5 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -7,8 +7,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedPassageFormatter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedPassageFormatter.java index 0ab917400c66..6a6e37883470 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedPassageFormatter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedPassageFormatter.java @@ -19,6 +19,7 @@ import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; /** @@ -120,11 +121,27 @@ public String toString() { static MarkupPassage mergeAnnotations(AnnotationToken[] annotations, Passage passage) { MarkupPassage markupPassage = new MarkupPassage(); + final Integer[] matches = new Integer[passage.getNumMatches()]; + for (int i = 0; i < matches.length; ++i) { + matches[i] = i; + } + Arrays.sort(matches, (l, r) -> { + int lStart = passage.getMatchStarts()[l]; + int lEnd = passage.getMatchEnds()[l]; + int rStart = passage.getMatchStarts()[r]; + int rEnd = passage.getMatchEnds()[r]; + if (lStart == rStart) { + return rEnd - lEnd; // longest match first + } else { + return lStart - rStart; + } + }); + // Add search hits first - they take precedence over any other markup - for (int i = 0; i < passage.getNumMatches(); i++) { - int start = passage.getMatchStarts()[i]; - int end = passage.getMatchEnds()[i]; - String searchTerm = passage.getMatchTerms()[i].utf8ToString(); + for (int matchId : matches) { + int start = passage.getMatchStarts()[matchId]; + int end = passage.getMatchEnds()[matchId]; + String searchTerm = passage.getMatchTerms()[matchId].utf8ToString(); Markup markup = new Markup(start, end, SEARCH_HIT_TYPE + "=" + URLEncoder.encode(searchTerm, StandardCharsets.UTF_8)); markupPassage.addUnlessOverlapping(markup); } diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java index 24a5e34a6eb2..f73bb7cbacbd 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextHighlighterTests.java @@ -18,6 +18,8 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.PhraseQuery; @@ -197,6 +199,16 @@ public void testAnnotatedTextOverlapsWithUnstructuredSearchTerms() throws Except assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); } + public void testAnnotatedTextHighlightQueryHasOverlappingTermAndAnnotation() throws Exception { + final String[] markedUpInputs = { "[Donald Trump](president) visited Singapore" }; + String[] expectedPassages = { "[Donald Trump](_hit_term=president&president) visited Singapore" }; + Query query = new BooleanQuery.Builder().add(new TermQuery(new Term("text", "donald")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("text", "president")), BooleanClause.Occur.SHOULD) + .build(); + BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR); + assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages); + } + public void testAnnotatedTextMultiFieldWithBreakIterator() throws Exception { final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald", diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 6243871ef3bd..7e93e6d29539 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -7,8 +7,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' -apply plugin: 'elasticsearch.yaml-rest-compat-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' esplugin { description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' diff --git a/plugins/mapper-size/src/yamlRestTest/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java b/plugins/mapper-size/src/yamlRestTest/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java index a1bef57849f2..e9730418307e 100644 --- a/plugins/mapper-size/src/yamlRestTest/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java +++ b/plugins/mapper-size/src/yamlRestTest/java/org/elasticsearch/index/mapper/size/MapperSizeClientYamlTestSuiteIT.java @@ -11,11 +11,16 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class MapperSizeClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().plugin("mapper-size").build(); + public MapperSizeClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } @@ -24,4 +29,9 @@ public MapperSizeClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate tes public static Iterable parameters() throws Exception { return createParameters(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6cd9053f2d47..df8d3cdac1c4 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -15,8 +15,8 @@ import java.nio.file.Path import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.internal-java-rest-test' -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' esplugin { description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' @@ -47,7 +47,7 @@ dependencies { } runtimeOnly "org.apache.hadoop:hadoop-client-runtime:${versions.hadoop}" implementation "org.apache.hadoop:hadoop-hdfs:${versions.hadoop}" - api 'com.google.protobuf:protobuf-java:3.4.0' + api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api 'commons-cli:commons-cli:1.2' @@ -304,8 +304,11 @@ tasks.named("thirdPartyAudit").configure { ignoreMissingClasses() ignoreViolations( // internal java api: sun.misc.Unsafe + 'com.google.protobuf.MessageSchema', 'com.google.protobuf.UnsafeUtil', 'com.google.protobuf.UnsafeUtil$1', + 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', 'com.google.protobuf.UnsafeUtil$MemoryAccessor', 'org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture$UnsafeAtomicHelper', diff --git a/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java index 2d291cdce88f..0c0329e3cdb0 100644 --- a/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java +++ b/plugins/repository-hdfs/src/javaRestTest/java/org/elasticsearch/repositories/hdfs/HaHdfsFailoverTestSuiteIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.Assert; @@ -100,7 +101,7 @@ public void testHAFailoverWithRepository() throws Exception { // Create repository { Request request = new Request("PUT", "/_snapshot/hdfs_ha_repo_read"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "type": "hdfs", "settings": { diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java index 624f327be1a0..f93d65098c1b 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.Streams; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.test.ESTestCase; import org.hamcrest.CoreMatchers; diff --git a/plugins/store-smb/build.gradle b/plugins/store-smb/build.gradle index 91c7dcadf022..7147c7302733 100644 --- a/plugins/store-smb/build.gradle +++ b/plugins/store-smb/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' apply plugin: 'elasticsearch.internal-cluster-test' esplugin { diff --git a/qa/ccs-common-rest/build.gradle b/qa/ccs-common-rest/build.gradle index 5e0d323f117b..c881e1728362 100644 --- a/qa/ccs-common-rest/build.gradle +++ b/qa/ccs-common-rest/build.gradle @@ -1,20 +1,4 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - - -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.test.RestIntegTestTask -import org.elasticsearch.gradle.testclusters.DefaultTestClustersTask - - -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.rest-resources' +apply plugin: 'elasticsearch.internal-yaml-rest-test' // This subproject copies a subset of the rest-api-spec rest tests and runs them in a slightly // modified way on two clusters connected via CCS. All operations except searches and a few other @@ -35,42 +19,13 @@ restResources { } } -def remoteCluster = testClusters.register("ccs-remote") { - numberOfNodes = 2 - setting 'node.roles', '[data,ingest,master]' -} - -def localCluster = testClusters.register("ccs-local") { - setting 'node.roles', '[data,ingest,master,remote_cluster_client]' - setting 'cluster.remote.connections_per_cluster', '1' - setting 'cluster.remote.remote_cluster.seeds', - { "\"${remoteCluster.get().getAllTransportPortURI().get(0)}\"" } -} - -testClusters.configureEach { - setting 'xpack.security.enabled', 'false' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") +dependencies { + clusterModules project(':x-pack:plugin:async-search') + clusterModules project(':modules:mapper-extras') + clusterModules project(':modules:aggregations') } -// the following task is needed to make sure the remote cluster is running before the local cluster -// gets configured with the remotes cluster seed -tasks.register('startRemoteCluster', DefaultTestClustersTask) { - useCluster remoteCluster - doLast { - clusters.each { c -> - print "Remote cluster transport uri for ccs configuration is: " - println c.getAllTransportPortURI().get(0) - } - } -} - -tasks.register("ccs-remote", RestIntegTestTask) { - mustRunAfter("precommit") - dependsOn startRemoteCluster - - useCluster remoteCluster - useCluster localCluster - +tasks.named("yamlRestTest") { systemProperty 'tests.rest.blacklist', [ 'search/150_rewrite_on_coordinator/Ensure that we fetch the document only once', // terms lookup query with index @@ -86,13 +41,5 @@ tasks.register("ccs-remote", RestIntegTestTask) { 'search.aggregation/70_adjacency_matrix/Terms lookup', // terms lookup by "index" doesn't seem to work correctly 'async_search/20-with-poin-in-time/Async search with point in time' // [indices] cannot be used with point in time ].join(',') - - - doFirst { - println "Remote cluster endpoints are: ${-> remoteCluster.get().allHttpSocketURI.join(",")}" - println "Local cluster endpoints are: ${-> localCluster.get().allHttpSocketURI.join(",")}" - nonInputProperties.systemProperty('tests.rest.cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.rest.search_cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) - } } diff --git a/qa/ccs-common-rest/src/test/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java similarity index 89% rename from qa/ccs-common-rest/src/test/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java rename to qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 5a4df29b6488..9df00025d21b 100644 --- a/qa/ccs-common-rest/src/test/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -23,6 +23,9 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; import org.elasticsearch.test.rest.yaml.section.DoSection; @@ -32,6 +35,9 @@ import org.elasticsearch.test.rest.yaml.section.MatchAssertion; import org.junit.AfterClass; import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import java.io.IOException; import java.util.ArrayList; @@ -58,9 +64,34 @@ public class CcsCommonYamlTestSuiteIT extends ESClientYamlSuiteTestCase { private static RestClient adminSearchClient; private static List clusterHosts; private static ClientYamlTestClient searchYamlTestClient; - // the remote cluster is the one we write index operations etc... to private static final String REMOTE_CLUSTER_NAME = "remote_cluster"; + + private static LocalClusterConfigProvider commonClusterConfig = cluster -> cluster.module("x-pack-async-search") + .module("aggregations") + .module("mapper-extras") + .setting("xpack.security.enabled", "false") + .feature(FeatureFlag.TIME_SERIES_MODE); + + private static ElasticsearchCluster remoteCluster = ElasticsearchCluster.local() + .name(REMOTE_CLUSTER_NAME) + .nodes(2) + .setting("node.roles", "[data,ingest,master]") + .apply(commonClusterConfig) + .build(); + + private static ElasticsearchCluster localCluster = ElasticsearchCluster.local() + .name("local_cluster") + .setting("node.roles", "[data,ingest,master,remote_cluster_client]") + .setting("cluster.remote.remote_cluster.seeds", () -> "\"" + remoteCluster.getTransportEndpoint(0) + "\"") + .setting("cluster.remote.connections_per_cluster", "1") + .apply(commonClusterConfig) + .build(); + + @ClassRule + // Use a RuleChain to ensure that remote cluster is started before local cluster + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + // the CCS api calls that we run against the "search" cluster in this test setup private static final Set CCS_APIS = Set.of( "search", @@ -75,6 +106,11 @@ public class CcsCommonYamlTestSuiteIT extends ESClientYamlSuiteTestCase { "async_search.delete" ); + @Override + protected String getTestRestCluster() { + return remoteCluster.getHttpAddresses(); + } + /** * initialize the search client and an additional administration client and check for an established connection */ @@ -84,9 +120,7 @@ public void initSearchClient() throws IOException { assert adminSearchClient == null; assert clusterHosts == null; - final String cluster = System.getProperty("tests.rest.search_cluster"); - assertNotNull("[tests.rest.search_cluster] is not configured", cluster); - String[] stringUrls = cluster.split(","); + String[] stringUrls = localCluster.getHttpAddresses().split(","); List hosts = new ArrayList<>(stringUrls.length); for (String stringUrl : stringUrls) { int portSeparator = stringUrl.lastIndexOf(':'); diff --git a/qa/ccs-unavailable-clusters/build.gradle b/qa/ccs-unavailable-clusters/build.gradle index 1b866385ecba..3b958bf9c8b9 100644 --- a/qa/ccs-unavailable-clusters/build.gradle +++ b/qa/ccs-unavailable-clusters/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' testClusters.matching { it.name == "javaRestTest" }.configureEach { setting 'xpack.security.enabled', 'true' diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index a3af45b43363..b6f181809e0e 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -6,64 +6,20 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' -apply plugin: 'elasticsearch.internal-test-artifact' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' -BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> - def baseCluster = testClusters.register(baseName) { - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When testing older versions we have to first upgrade to 7.last - versions = [bwcVersion.toString(), BuildParams.bwcVersions.minimumWireCompatibleVersion.toString(), project.version] - } else { - versions = [bwcVersion.toString(), project.version] - } - numberOfNodes = 2 - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '60m' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - setting 'xpack.security.enabled', 'false' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - mustRunAfter("precommit") - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - - systemProperty 'tests.is_old_cluster', 'true' - } - - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - dependsOn "${baseName}#oldClusterTest" - doFirst { - baseCluster.get().goToNextVersion() - if (bwcVersion.before(BuildParams.bwcVersions.minimumWireCompatibleVersion)) { - // When doing a full cluster restart of older versions we actually have to upgrade twice. First to 7.last, then to the current version. - baseCluster.get().goToNextVersion() - } - } - systemProperty 'tests.is_old_cluster', 'false' - } - - String oldVersion = bwcVersion.toString().minus("-SNAPSHOT") - tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach { - it.systemProperty 'tests.old_cluster_version', oldVersion - it.systemProperty 'tests.path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - it.nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - it.nonInputProperties.systemProperty('tests.clustername', baseName) - } +testArtifacts { + registerTestArtifactFromSourceSet(sourceSets.javaRestTest) +} - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#upgradedClusterTest") +BuildParams.bwcVersions.withIndexCompatible { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) } } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java new file mode 100644 index 000000000000..3f9a007e6bf4 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -0,0 +1,1902 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.elasticsearch.test.NotEqualMessageBuilder; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.transport.Compression; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.IntStream; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; +import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +/** + * Tests to run before and after a full cluster restart. This is run twice, + * one with {@code tests.is_old_cluster} set to {@code true} against a cluster + * of an older version. The cluster is shutdown and a cluster of the new + * version is started with the same data directories and then this is rerun + * with {@code tests.is_old_cluster} set to {@code false}. + */ +public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCase { + + private static TemporaryFolder repoDirectory = new TemporaryFolder(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + private static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("path.repo", () -> repoDirectory.getRoot().getPath()) + .setting("xpack.security.enabled", "false") + // some tests rely on the translog not being flushed + .setting("indices.memory.shard_inactive_time", "60m") + .apply(() -> clusterConfig) + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + private String index; + + public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + @Before + public void setIndex() { + index = getRootTestName(); + } + + public void testSearch() throws Exception { + int count; + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("string"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("dots_in_field_names"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("binary"); + mappingsAndSettings.field("type", "binary"); + mappingsAndSettings.field("store", "true"); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + + count = randomIntBetween(2000, 3000); + byte[] randomByteArray = new byte[16]; + random().nextBytes(randomByteArray); + indexRandomDocuments( + count, + true, + true, + randomBoolean(), + i -> JsonXContent.contentBuilder() + .startObject() + .field("string", randomAlphaOfLength(10)) + .field("int", randomInt(100)) + .field("float", randomFloat()) + // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct + .field("bool", i > 0 && randomBoolean()) + .field("field.with.dots", randomAlphaOfLength(10)) + .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) + .endObject() + ); + refreshAllIndices(); + } else { + count = countOfIndexedRandomDocuments(); + } + + ensureGreenLongWait(index); + assertBasicSearchWorks(count); + assertAllSearchWorks(count); + assertBasicAggregationWorks(); + assertRealtimeGetWorks(); + assertStoredBinaryFields(count); + } + + public void testNewReplicas() throws Exception { + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + + int numDocs = randomIntBetween(2000, 3000); + indexRandomDocuments( + numDocs, + true, + false, + randomBoolean(), + i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() + ); + logger.info("Refreshing [{}]", index); + client().performRequest(new Request("POST", "/" + index + "/_refresh")); + } else { + // The test runs with two nodes so this should still go green. + final int numReplicas = 1; + final long startTime = System.currentTimeMillis(); + logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); + Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); + setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + client().performRequest(setNumberOfReplicas); + + ensureGreenLongWait(index); + + logger.debug("--> index [{}] is green, took [{}] ms", index, (System.currentTimeMillis() - startTime)); + Map recoverRsp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_recovery"))); + logger.debug("--> recovery status:\n{}", recoverRsp); + + Set counts = new HashSet<>(); + for (String node : dataNodes(index, client())) { + Request search = new Request("GET", "/" + index + "/_search"); + search.addParameter("preference", "_only_nodes:" + node); + Map responseBody = entityAsMap(client().performRequest(search)); + assertNoFailures(responseBody); + int hits = extractTotalHits(responseBody); + counts.add(hits); + } + assertEquals("All nodes should have a consistent number of documents", 1, counts.size()); + } + } + + public void testSearchTimeSeriesMode() throws Exception { + assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); + int numDocs; + if (isRunningAgainstOldCluster()) { + numDocs = createTimeSeriesModeIndex(1); + } else { + numDocs = countOfIndexedRandomDocuments(); + } + assertCountAll(numDocs); + Request request = new Request("GET", "/" + index + "/_search"); + XContentBuilder body = jsonBuilder().startObject(); + body.field("size", 0); + body.startObject("aggs").startObject("check").startObject("scripted_metric"); + { + body.field("init_script", "state.timeSeries = new HashSet()"); + body.field("map_script", "state.timeSeries.add(doc['dim'].value)"); + body.field("combine_script", "return state.timeSeries"); + StringBuilder reduceScript = new StringBuilder(); + reduceScript.append("Set timeSeries = new TreeSet();"); + reduceScript.append("for (s in states) {"); + reduceScript.append(" for (ts in s) {"); + reduceScript.append(" boolean newTs = timeSeries.add(ts);"); + reduceScript.append(" if (false == newTs) {"); + reduceScript.append(" throw new IllegalArgumentException(ts + ' appeared in two shards');"); + reduceScript.append(" }"); + reduceScript.append(" }"); + reduceScript.append("}"); + reduceScript.append("return timeSeries;"); + body.field("reduce_script", reduceScript.toString()); + } + body.endObject().endObject().endObject(); + body.endObject(); + request.setJsonEntity(Strings.toString(body)); + Map response = entityAsMap(client().performRequest(request)); + assertMap( + response, + matchesMap().extraOk() + .entry("hits", matchesMap().extraOk().entry("total", Map.of("value", numDocs, "relation", "eq"))) + .entry("aggregations", Map.of("check", Map.of("value", IntStream.range(0, 10).mapToObj(i -> "dim" + i).collect(toList())))) + ); + } + + public void testNewReplicasTimeSeriesMode() throws Exception { + assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); + if (isRunningAgainstOldCluster()) { + createTimeSeriesModeIndex(0); + } else { + // The test runs with two nodes so this should still go green. + final int numReplicas = 1; + final long startTime = System.currentTimeMillis(); + logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); + Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); + setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + client().performRequest(setNumberOfReplicas); + + ensureGreenLongWait(index); + + logger.debug("--> index [{}] is green, took [{}] ms", index, (System.currentTimeMillis() - startTime)); + Map recoverRsp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_recovery"))); + logger.debug("--> recovery status:\n{}", recoverRsp); + + Set counts = new HashSet<>(); + for (String node : dataNodes(index, client())) { + Request search = new Request("GET", "/" + index + "/_search"); + search.addParameter("preference", "_only_nodes:" + node); + Map responseBody = entityAsMap(client().performRequest(search)); + assertNoFailures(responseBody); + int hits = extractTotalHits(responseBody); + counts.add(hits); + } + assertEquals("All nodes should have a consistent number of documents", 1, counts.size()); + } + } + + private int createTimeSeriesModeIndex(int replicas) throws IOException { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", replicas); + mappingsAndSettings.field("mode", "time_series"); + mappingsAndSettings.field("routing_path", "dim"); + mappingsAndSettings.field("time_series.start_time", 1L); + mappingsAndSettings.field("time_series.end_time", DateUtils.MAX_MILLIS_BEFORE_9999 - 1); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("@timestamp").field("type", "date").endObject(); + mappingsAndSettings.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + + int numDocs = randomIntBetween(2000, 3000); + long basetime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2021-01-01T00:00:00Z"); + indexRandomDocuments( + numDocs, + true, + true, + false, + i -> JsonXContent.contentBuilder() + .startObject() + .field("@timestamp", basetime + TimeUnit.MINUTES.toMillis(i)) + .field("dim", "dim" + (i % 10)) + .endObject() + ); + logger.info("Refreshing [{}]", index); + client().performRequest(new Request("POST", "/" + index + "/_refresh")); + return numDocs; + } + + public void testClusterState() throws Exception { + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + mappingsAndSettings.field("index_patterns", index); + mappingsAndSettings.field("order", "1000"); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + Request createTemplate = new Request("PUT", "/_template/template_1"); + createTemplate.setJsonEntity(Strings.toString(mappingsAndSettings)); + createTemplate.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); + client().performRequest(createTemplate); + client().performRequest(new Request("PUT", "/" + index)); + } + + // verifying if we can still read some properties from cluster state api: + Map clusterState = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + + // Check some global properties: + String numberOfShards = (String) XContentMapValues.extractValue( + "metadata.templates.template_1.settings.index.number_of_shards", + clusterState + ); + assertEquals("1", numberOfShards); + String numberOfReplicas = (String) XContentMapValues.extractValue( + "metadata.templates.template_1.settings.index.number_of_replicas", + clusterState + ); + assertEquals("0", numberOfReplicas); + + // Check some index properties: + numberOfShards = (String) XContentMapValues.extractValue( + "metadata.indices." + index + ".settings.index.number_of_shards", + clusterState + ); + assertEquals("1", numberOfShards); + numberOfReplicas = (String) XContentMapValues.extractValue( + "metadata.indices." + index + ".settings.index.number_of_replicas", + clusterState + ); + assertEquals("0", numberOfReplicas); + Version version = Version.fromId( + Integer.valueOf( + (String) XContentMapValues.extractValue("metadata.indices." + index + ".settings.index.version.created", clusterState) + ) + ); + assertEquals(getOldClusterVersion(), version); + + } + + public void testShrink() throws IOException { + String shrunkenIndex = index + "_shrunk"; + int numDocs; + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("mappings"); + { + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + { + mappingsAndSettings.field("type", "text"); + } + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + mappingsAndSettings.startObject("settings"); + { + mappingsAndSettings.field("index.number_of_shards", 5); + } + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + + numDocs = randomIntBetween(512, 1024); + indexRandomDocuments( + numDocs, + true, + true, + randomBoolean(), + i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() + ); + + ensureGreen(index); // wait for source index to be available on both nodes before starting shrink + + Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); + updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); + client().performRequest(updateSettingsRequest); + + Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); + + shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); + client().performRequest(shrinkIndexRequest); + + refreshAllIndices(); + } else { + numDocs = countOfIndexedRandomDocuments(); + } + + Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); + assertNoFailures(response); + int totalShards = (int) XContentMapValues.extractValue("_shards.total", response); + assertThat(totalShards, greaterThan(1)); + int successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); + assertEquals(totalShards, successfulShards); + int totalHits = extractTotalHits(response); + assertEquals(numDocs, totalHits); + + response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); + assertNoFailures(response); + totalShards = (int) XContentMapValues.extractValue("_shards.total", response); + assertEquals(1, totalShards); + successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); + assertEquals(1, successfulShards); + totalHits = extractTotalHits(response); + assertEquals(numDocs, totalHits); + } + + public void testShrinkAfterUpgrade() throws IOException { + String shrunkenIndex = index + "_shrunk"; + int numDocs; + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("mappings"); + { + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + { + mappingsAndSettings.field("type", "text"); + } + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("index.number_of_shards", 5); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + + numDocs = randomIntBetween(512, 1024); + indexRandomDocuments( + numDocs, + true, + true, + randomBoolean(), + i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() + ); + } else { + ensureGreen(index); // wait for source index to be available on both nodes before starting shrink + + Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); + updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); + client().performRequest(updateSettingsRequest); + + Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); + shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); + client().performRequest(shrinkIndexRequest); + + numDocs = countOfIndexedRandomDocuments(); + } + + refreshAllIndices(); + + Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); + assertNoFailures(response); + int totalShards = (int) XContentMapValues.extractValue("_shards.total", response); + assertThat(totalShards, greaterThan(1)); + int successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); + assertEquals(totalShards, successfulShards); + int totalHits = extractTotalHits(response); + assertEquals(numDocs, totalHits); + + if (isRunningAgainstOldCluster() == false) { + response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); + assertNoFailures(response); + totalShards = (int) XContentMapValues.extractValue("_shards.total", response); + assertEquals(1, totalShards); + successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); + assertEquals(1, successfulShards); + totalHits = extractTotalHits(response); + assertEquals(numDocs, totalHits); + } + } + + /** + * Test upgrading after a rollover. Specifically: + *
    + *
  1. Create an index with a write alias + *
  2. Write some documents to the write alias + *
  3. Roll over the index + *
  4. Make sure the document count is correct + *
  5. Upgrade + *
  6. Write some more documents to the write alias + *
  7. Make sure the document count is correct + *
+ */ + public void testRollover() throws IOException { + if (isRunningAgainstOldCluster()) { + Request createIndex = new Request("PUT", "/" + index + "-000001"); + createIndex.setJsonEntity(Strings.format(""" + { + "aliases": { + "%s_write": {} + } + }""", index)); + client().performRequest(createIndex); + } + + int bulkCount = 10; + String bulk = """ + {"index":{}} + {"test":"test"} + """.repeat(bulkCount); + + Request bulkRequest = new Request("POST", "/" + index + "_write/_bulk"); + + bulkRequest.setJsonEntity(bulk); + bulkRequest.addParameter("refresh", ""); + assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); + + if (isRunningAgainstOldCluster()) { + Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); + rolloverRequest.setJsonEntity(""" + { "conditions": { "max_docs": 5 }}"""); + client().performRequest(rolloverRequest); + + assertThat( + EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), + containsString("testrollover-000002") + ); + } + + Request countRequest = new Request("POST", "/" + index + "-*/_search"); + countRequest.addParameter("size", "0"); + Map count = entityAsMap(client().performRequest(countRequest)); + assertNoFailures(count); + + int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount); + assertEquals(expectedCount, extractTotalHits(count)); + } + + void assertCountAll(int count) throws IOException { + Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); + assertNoFailures(response); + int numDocs = extractTotalHits(response); + logger.info("Found {} in old index", numDocs); + assertEquals(count, numDocs); + } + + void assertBasicSearchWorks(int count) throws IOException { + logger.info("--> testing basic search"); + { + assertCountAll(count); + } + + logger.info("--> testing basic search with sort"); + { + Request searchRequest = new Request("GET", "/" + index + "/_search"); + searchRequest.setJsonEntity(""" + { "sort": [{ "int" : "asc" }]}"""); + Map response = entityAsMap(client().performRequest(searchRequest)); + assertNoFailures(response); + assertTotalHits(count, response); + } + + logger.info("--> testing exists filter"); + { + Request searchRequest = new Request("GET", "/" + index + "/_search"); + searchRequest.setJsonEntity(""" + { "query": { "exists" : {"field": "string"} }}"""); + Map response = entityAsMap(client().performRequest(searchRequest)); + assertNoFailures(response); + assertTotalHits(count, response); + } + + logger.info("--> testing field with dots in the name"); + { + Request searchRequest = new Request("GET", "/" + index + "/_search"); + searchRequest.setJsonEntity(""" + { "query": { "exists" : {"field": "field.with.dots"} }}"""); + Map response = entityAsMap(client().performRequest(searchRequest)); + assertNoFailures(response); + assertTotalHits(count, response); + } + } + + void assertAllSearchWorks(int count) throws IOException { + logger.info("--> testing _all search"); + Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); + assertNoFailures(response); + assertTotalHits(count, response); + Map bestHit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", response))).get(0); + + // Make sure there are payloads and they are taken into account for the score + // the 'string' field has a boost of 4 in the mappings so it should get a payload boost + String stringValue = (String) XContentMapValues.extractValue("_source.string", bestHit); + assertNotNull(stringValue); + String id = (String) bestHit.get("_id"); + + Request explainRequest = new Request("GET", "/" + index + "/_explain/" + id); + explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); + String explanation = toStr(client().performRequest(explainRequest)); + assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); + + // Make sure the query can run on the whole index + Request searchRequest = new Request("GET", "/" + index + "/_search"); + searchRequest.setEntity(explainRequest.getEntity()); + searchRequest.addParameter("explain", "true"); + Map matchAllResponse = entityAsMap(client().performRequest(searchRequest)); + assertNoFailures(matchAllResponse); + assertTotalHits(count, matchAllResponse); + } + + void assertBasicAggregationWorks() throws IOException { + // histogram on a long + Request longHistogramRequest = new Request("GET", "/" + index + "/_search"); + longHistogramRequest.setJsonEntity(""" + { + "aggs": { + "histo": { + "histogram": { + "field": "int", + "interval": 10 + } + } + } + }"""); + Map longHistogram = entityAsMap(client().performRequest(longHistogramRequest)); + assertNoFailures(longHistogram); + List histoBuckets = (List) XContentMapValues.extractValue("aggregations.histo.buckets", longHistogram); + int histoCount = 0; + for (Object entry : histoBuckets) { + Map bucket = (Map) entry; + histoCount += (Integer) bucket.get("doc_count"); + } + assertTotalHits(histoCount, longHistogram); + + // terms on a boolean + Request boolTermsRequest = new Request("GET", "/" + index + "/_search"); + boolTermsRequest.setJsonEntity(""" + { + "aggs": { + "bool_terms": { + "terms": { + "field": "bool" + } + } + } + }"""); + Map boolTerms = entityAsMap(client().performRequest(boolTermsRequest)); + List termsBuckets = (List) XContentMapValues.extractValue("aggregations.bool_terms.buckets", boolTerms); + int termsCount = 0; + for (Object entry : termsBuckets) { + Map bucket = (Map) entry; + termsCount += (Integer) bucket.get("doc_count"); + } + assertTotalHits(termsCount, boolTerms); + } + + void assertRealtimeGetWorks() throws IOException { + Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); + disableAutoRefresh.setJsonEntity(""" + { "index": { "refresh_interval" : -1 }}"""); + client().performRequest(disableAutoRefresh); + + Request searchRequest = new Request("GET", "/" + index + "/_search"); + searchRequest.setJsonEntity(""" + { "query": { "match_all" : {} }}"""); + Map searchResponse = entityAsMap(client().performRequest(searchRequest)); + Map hit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); + String docId = (String) hit.get("_id"); + + Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); + updateRequest.setJsonEntity(""" + { "doc" : { "foo": "bar"}}"""); + client().performRequest(updateRequest); + + Request getRequest = new Request("GET", "/" + index + "/_doc/" + docId); + + Map getRsp = entityAsMap(client().performRequest(getRequest)); + Map source = (Map) getRsp.get("_source"); + assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); + + Request enableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); + enableAutoRefresh.setJsonEntity(""" + { "index": { "refresh_interval" : "1s" }}"""); + client().performRequest(enableAutoRefresh); + } + + void assertStoredBinaryFields(int count) throws Exception { + Request request = new Request("GET", "/" + index + "/_search"); + request.setJsonEntity(""" + { + "query": { + "match_all": {} + }, + "size": 100, + "stored_fields": "binary" + }"""); + Map rsp = entityAsMap(client().performRequest(request)); + + assertTotalHits(count, rsp); + List hits = (List) XContentMapValues.extractValue("hits.hits", rsp); + assertEquals(100, hits.size()); + for (Object hit : hits) { + Map hitRsp = (Map) hit; + List values = (List) XContentMapValues.extractValue("fields.binary", hitRsp); + assertEquals(1, values.size()); + String value = (String) values.get(0); + byte[] binaryValue = Base64.getDecoder().decode(value); + assertEquals("Unexpected string length [" + value + "]", 16, binaryValue.length); + } + } + + static String toStr(Response response) throws IOException { + return EntityUtils.toString(response.getEntity()); + } + + static void assertNoFailures(Map response) { + int failed = (int) XContentMapValues.extractValue("_shards.failed", response); + assertEquals(0, failed); + } + + void assertTotalHits(int expectedTotalHits, Map response) { + int actualTotalHits = extractTotalHits(response); + assertEquals(response.toString(), expectedTotalHits, actualTotalHits); + } + + static int extractTotalHits(Map response) { + return (Integer) XContentMapValues.extractValue("hits.total.value", response); + } + + /** + * Tests that a single document survives. Super basic smoke test. + */ + public void testSingleDoc() throws IOException { + String docLocation = "/" + index + "/_doc/1"; + String doc = "{\"test\": \"test\"}"; + + if (isRunningAgainstOldCluster()) { + Request createDoc = new Request("PUT", docLocation); + createDoc.setJsonEntity(doc); + client().performRequest(createDoc); + } + + Request request = new Request("GET", docLocation); + assertThat(toStr(client().performRequest(request)), containsString(doc)); + } + + /** + * Tests that a single empty shard index is correctly recovered. Empty shards are often an edge case. + */ + public void testEmptyShard() throws IOException { + final String indexName = "test_empty_shard"; + + if (isRunningAgainstOldCluster()) { + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + // if the node with the replica is the first to be restarted, while a replica is still recovering + // then delayed allocation will kick in. When the node comes back, the master will search for a copy + // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN + // before timing out + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + createIndex(indexName, settings.build()); + } + ensureGreen(indexName); + } + + /** + * Tests recovery of an index with or without a translog and the + * statistics we gather about that. + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/52031") + public void testRecovery() throws Exception { + int count; + boolean shouldHaveTranslog; + if (isRunningAgainstOldCluster()) { + count = between(200, 300); + /* We've had bugs in the past where we couldn't restore + * an index without a translog so we randomize whether + * or not we have one. */ + shouldHaveTranslog = randomBoolean(); + Settings.Builder settings = Settings.builder(); + if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } + final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; + createIndex(index, settings.build(), mappings); + indexRandomDocuments(count, true, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); + + // make sure all recoveries are done + ensureGreen(index); + + // Force flush so we're sure that all translog are committed + Request flushRequest = new Request("POST", "/" + index + "/_flush"); + flushRequest.addParameter("force", "true"); + flushRequest.addParameter("wait_if_ongoing", "true"); + assertOK(client().performRequest(flushRequest)); + + if (shouldHaveTranslog) { + // Update a few documents so we are sure to have a translog + indexRandomDocuments( + count / 10, + false, // flushing here would invalidate the whole thing + false, + true, + i -> jsonBuilder().startObject().field("field", "value").endObject() + ); + } + saveInfoDocument(index + "_should_have_translog", Boolean.toString(shouldHaveTranslog)); + } else { + count = countOfIndexedRandomDocuments(); + shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument(index + "_should_have_translog")); + } + + // Count the documents in the index to make sure we have as many as we put there + Request countRequest = new Request("GET", "/" + index + "/_search"); + countRequest.addParameter("size", "0"); + refreshAllIndices(); + Map countResponse = entityAsMap(client().performRequest(countRequest)); + assertTotalHits(count, countResponse); + + if (false == isRunningAgainstOldCluster()) { + boolean restoredFromTranslog = false; + boolean foundPrimary = false; + Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index); + recoveryRequest.addParameter("h", "index,shard,type,stage,translog_ops_recovered"); + recoveryRequest.addParameter("s", "index,shard,type"); + String recoveryResponse = toStr(client().performRequest(recoveryRequest)); + for (String line : recoveryResponse.split("\n")) { + // Find the primaries + foundPrimary = true; + if (false == line.contains("done") && line.contains("existing_store")) { + continue; + } + /* Mark if we see a primary that looked like it restored from the translog. + * Not all primaries will look like this all the time because we modify + * random documents when we want there to be a translog and they might + * not be spread around all the shards. */ + Matcher m = Pattern.compile("(\\d+)$").matcher(line); + assertTrue(line, m.find()); + int translogOps = Integer.parseInt(m.group(1)); + if (translogOps > 0) { + restoredFromTranslog = true; + } + } + assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary); + assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); + + String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); + String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); + String minCompatibleBWCVersion = Version.CURRENT.minimumCompatibilityVersion().luceneVersion.toString(); + if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { + int numCurrentVersion = 0; + int numBwcVersion = 0; + Request segmentsRequest = new Request("GET", "/_cat/segments/" + index); + segmentsRequest.addParameter("h", "prirep,shard,index,version"); + segmentsRequest.addParameter("s", "prirep,shard,index"); + String segmentsResponse = toStr(client().performRequest(segmentsRequest)); + for (String line : segmentsResponse.split("\n")) { + if (false == line.startsWith("p")) { + continue; + } + Matcher m = Pattern.compile("(\\d+\\.\\d+\\.\\d+)$").matcher(line); + assertTrue(line, m.find()); + String version = m.group(1); + if (currentLuceneVersion.equals(version)) { + numCurrentVersion++; + } else if (bwcLuceneVersion.equals(version)) { + numBwcVersion++; + } else if (minCompatibleBWCVersion.equals(version) && minCompatibleBWCVersion.equals(bwcLuceneVersion) == false) { + // Our upgrade path from 7.non-last always goes through 7.last, which depending on timing can create 7.last + // index segment. We ignore those. + continue; + } else { + fail("expected version to be one of [" + currentLuceneVersion + "," + bwcLuceneVersion + "] but was " + line); + } + } + assertNotEquals( + "expected at least 1 current segment after translog recovery. segments:\n" + segmentsResponse, + 0, + numCurrentVersion + ); + assertNotEquals("expected at least 1 old segment. segments:\n" + segmentsResponse, 0, numBwcVersion); + } + } + } + + /** + * Tests snapshot/restore by creating a snapshot and restoring it. It takes + * a snapshot on the old cluster and restores it on the old cluster as a + * sanity check and on the new cluster as an upgrade test. It also takes a + * snapshot on the new cluster and restores that on the new cluster as a + * test that the repository is ok with containing snapshot from both the + * old and new versions. All of the snapshots include an index, a template, + * and some routing configuration. + */ + public void testSnapshotRestore() throws IOException { + int count; + if (isRunningAgainstOldCluster()) { + // Create the index + count = between(200, 300); + Settings.Builder settings = Settings.builder(); + if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } + createIndex(index, settings.build()); + indexRandomDocuments(count, true, true, randomBoolean(), i -> jsonBuilder().startObject().field("field", "value").endObject()); + } else { + count = countOfIndexedRandomDocuments(); + } + + // Refresh the index so the count doesn't fail + refreshAllIndices(); + + // Count the documents in the index to make sure we have as many as we put there + Request countRequest = new Request("GET", "/" + index + "/_search"); + countRequest.addParameter("size", "0"); + Map countResponse = entityAsMap(client().performRequest(countRequest)); + assertTotalHits(count, countResponse); + + // Stick a routing attribute into to cluster settings so we can see it after the restore + Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); + addRoutingSettings.setJsonEntity(Strings.format(""" + {"persistent": {"cluster.routing.allocation.exclude.test_attr": "%s"}} + """, getOldClusterVersion())); + client().performRequest(addRoutingSettings); + + // Stick a template into the cluster so we can see it after the restore + XContentBuilder templateBuilder = JsonXContent.contentBuilder().startObject(); + templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template + templateBuilder.startObject("settings"); + { + templateBuilder.field("number_of_shards", 1); + } + templateBuilder.endObject(); + templateBuilder.startObject("mappings"); + { + { + templateBuilder.startObject("_source"); + { + templateBuilder.field("enabled", true); + } + templateBuilder.endObject(); + } + } + templateBuilder.endObject(); + templateBuilder.startObject("aliases"); + { + templateBuilder.startObject("alias1").endObject(); + templateBuilder.startObject("alias2"); + { + templateBuilder.startObject("filter"); + { + templateBuilder.startObject("term"); + { + templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT); + } + templateBuilder.endObject(); + } + templateBuilder.endObject(); + } + templateBuilder.endObject(); + } + templateBuilder.endObject().endObject(); + Request createTemplateRequest = new Request("PUT", "/_template/test_template"); + createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); + createTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); + + client().performRequest(createTemplateRequest); + + if (isRunningAgainstOldCluster()) { + // Create the repo + XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); + { + repoConfig.field("type", "fs"); + repoConfig.startObject("settings"); + { + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); + } + repoConfig.endObject(); + } + repoConfig.endObject(); + Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); + createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); + client().performRequest(createRepoRequest); + } + + Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); + createSnapshot.addParameter("wait_for_completion", "true"); + createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); + client().performRequest(createSnapshot); + + checkSnapshot("old_snap", count, getOldClusterVersion()); + if (false == isRunningAgainstOldCluster()) { + checkSnapshot("new_snap", count, Version.CURRENT); + } + } + + public void testHistoryUUIDIsAdded() throws Exception { + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 1); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + } else { + ensureGreenLongWait(index); + + Request statsRequest = new Request("GET", index + "/_stats"); + statsRequest.addParameter("level", "shards"); + Response response = client().performRequest(statsRequest); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + assertThat(shardStats, notNullValue()); + assertThat("Expected stats for 2 shards", shardStats, hasSize(2)); + String globalHistoryUUID = null; + for (Object shard : shardStats) { + final String nodeId = ObjectPath.evaluate(shard, "routing.node"); + final Boolean primary = ObjectPath.evaluate(shard, "routing.primary"); + logger.info("evaluating: {} , {}", ObjectPath.evaluate(shard, "routing"), ObjectPath.evaluate(shard, "commit")); + String historyUUID = ObjectPath.evaluate(shard, "commit.user_data.history_uuid"); + assertThat("no history uuid found on " + nodeId + " (primary: " + primary + ")", historyUUID, notNullValue()); + if (globalHistoryUUID == null) { + globalHistoryUUID = historyUUID; + } else { + assertThat( + "history uuid mismatch on " + nodeId + " (primary: " + primary + ")", + historyUUID, + equalTo(globalHistoryUUID) + ); + } + } + } + } + + public void testSoftDeletes() throws Exception { + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 1); + if (randomBoolean()) { + mappingsAndSettings.field("soft_deletes.enabled", true); + } + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + int numDocs = between(10, 100); + for (int i = 0; i < numDocs; i++) { + String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); + Request request = new Request("POST", "/" + index + "/_doc/" + i); + request.setJsonEntity(doc); + client().performRequest(request); + refreshAllIndices(); + } + client().performRequest(new Request("POST", "/" + index + "/_flush")); + int liveDocs = numDocs; + assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); + for (int i = 0; i < numDocs; i++) { + if (randomBoolean()) { + String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); + Request request = new Request("POST", "/" + index + "/_doc/" + i); + request.setJsonEntity(doc); + client().performRequest(request); + } else if (randomBoolean()) { + client().performRequest(new Request("DELETE", "/" + index + "/_doc/" + i)); + liveDocs--; + } + } + refreshAllIndices(); + assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); + saveInfoDocument(index + "_doc_count", Integer.toString(liveDocs)); + } else { + int liveDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count")); + assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); + } + } + + /** + * This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version, + * it verifies that the index exists and is replicated if the old version supports replication. + */ + public void testClosedIndices() throws Exception { + if (isRunningAgainstOldCluster()) { + createIndex(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); + ensureGreen(index); + + int numDocs = 0; + if (randomBoolean()) { + numDocs = between(1, 100); + for (int i = 0; i < numDocs; i++) { + final Request request = new Request("POST", "/" + index + "/_doc/" + i); + request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject())); + assertOK(client().performRequest(request)); + if (rarely()) { + refreshAllIndices(); + } + } + refreshAllIndices(); + } + + assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); + saveInfoDocument(index + "_doc_count", Integer.toString(numDocs)); + closeIndex(index); + } + + if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) { + ensureGreenLongWait(index); + assertClosedIndex(index, true); + } else { + assertClosedIndex(index, false); + } + + if (isRunningAgainstOldCluster() == false) { + openIndex(index); + ensureGreen(index); + + final int expectedNumDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count")); + assertTotalHits(expectedNumDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); + } + } + + /** + * Asserts that an index is closed in the cluster state. If `checkRoutingTable` is true, it also asserts + * that the index has started shards. + */ + @SuppressWarnings("unchecked") + private void assertClosedIndex(final String indexName, final boolean checkRoutingTable) throws IOException { + final Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); + + final Map metadata = (Map) XContentMapValues.extractValue("metadata.indices." + indexName, state); + assertThat(metadata, notNullValue()); + assertThat(metadata.get("state"), equalTo("close")); + + final Map blocks = (Map) XContentMapValues.extractValue("blocks.indices." + indexName, state); + assertThat(blocks, notNullValue()); + assertThat(blocks.containsKey(String.valueOf(MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID)), is(true)); + + final Map settings = (Map) XContentMapValues.extractValue("settings", metadata); + assertThat(settings, notNullValue()); + + final Map routingTable = (Map) XContentMapValues.extractValue( + "routing_table.indices." + indexName, + state + ); + if (checkRoutingTable) { + assertThat(routingTable, notNullValue()); + assertThat(Booleans.parseBoolean((String) XContentMapValues.extractValue("index.verified_before_close", settings)), is(true)); + final String numberOfShards = (String) XContentMapValues.extractValue("index.number_of_shards", settings); + assertThat(numberOfShards, notNullValue()); + final int nbShards = Integer.parseInt(numberOfShards); + assertThat(nbShards, greaterThanOrEqualTo(1)); + + for (int i = 0; i < nbShards; i++) { + final Collection> shards = (Collection>) XContentMapValues.extractValue( + "shards." + i, + routingTable + ); + assertThat(shards, notNullValue()); + assertThat(shards.size(), equalTo(2)); + for (Map shard : shards) { + assertThat(XContentMapValues.extractValue("shard", shard), equalTo(i)); + assertThat(XContentMapValues.extractValue("state", shard), equalTo("STARTED")); + assertThat(XContentMapValues.extractValue("index", shard), equalTo(indexName)); + } + } + } else { + assertThat(routingTable, nullValue()); + assertThat(XContentMapValues.extractValue("index.verified_before_close", settings), nullValue()); + } + } + + @SuppressWarnings("unchecked") + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { + // Check the snapshot metadata, especially the version + Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); + Map snapResponse = entityAsMap(client().performRequest(listSnapshotRequest)); + + assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); + assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); + assertEquals(singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", snapResponse)); + + // Remove the routing setting and template so we can test restoring them. + Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); + clearRoutingFromSettings.setJsonEntity(""" + {"persistent":{"cluster.routing.allocation.exclude.test_attr": null}}"""); + client().performRequest(clearRoutingFromSettings); + client().performRequest(new Request("DELETE", "/_template/test_template")); + + // Restore + XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); + restoreCommand.field("include_global_state", true); + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored_" + index); + restoreCommand.endObject(); + Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshotName + "/_restore"); + restoreRequest.addParameter("wait_for_completion", "true"); + restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); + client().performRequest(restoreRequest); + + // Make sure search finds all documents + Request countRequest = new Request("GET", "/restored_" + index + "/_search"); + countRequest.addParameter("size", "0"); + Map countResponse = entityAsMap(client().performRequest(countRequest)); + assertTotalHits(count, countResponse); + + // Add some extra documents to the index to be sure we can still write to it after restoring it + int extras = between(1, 100); + StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < extras; i++) { + bulk.append(Strings.format(""" + {"index":{"_id":"%s"}} + {"test":"test"} + """, count + i)); + } + + Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/_bulk"); + + writeToRestoredRequest.addParameter("refresh", "true"); + writeToRestoredRequest.setJsonEntity(bulk.toString()); + assertThat(EntityUtils.toString(client().performRequest(writeToRestoredRequest).getEntity()), containsString("\"errors\":false")); + + // And count to make sure the add worked + // Make sure search finds all documents + Request countAfterWriteRequest = new Request("GET", "/restored_" + index + "/_search"); + countAfterWriteRequest.addParameter("size", "0"); + Map countAfterResponse = entityAsMap(client().performRequest(countRequest)); + assertTotalHits(count + extras, countAfterResponse); + + // Clean up the index for the next iteration + client().performRequest(new Request("DELETE", "/restored_*")); + + // Check settings added by the restore process + Request clusterSettingsRequest = new Request("GET", "/_cluster/settings"); + clusterSettingsRequest.addParameter("flat_settings", "true"); + Map clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest)); + @SuppressWarnings("unchecked") + final Map persistentSettings = (Map) clusterSettingsResponse.get("persistent"); + assertThat(persistentSettings.get("cluster.routing.allocation.exclude.test_attr"), equalTo(getOldClusterVersion().toString())); + + // Check that the template was restored successfully + Request getTemplateRequest = new Request("GET", "/_template/test_template"); + + Map getTemplateResponse = entityAsMap(client().performRequest(getTemplateRequest)); + Map expectedTemplate = new HashMap<>(); + expectedTemplate.put("index_patterns", singletonList("evil_*")); + + expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); + expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); + + expectedTemplate.put("order", 0); + Map aliases = new HashMap<>(); + aliases.put("alias1", emptyMap()); + aliases.put("alias2", singletonMap("filter", singletonMap("term", singletonMap("version", tookOnVersion.toString())))); + expectedTemplate.put("aliases", aliases); + expectedTemplate = singletonMap("test_template", expectedTemplate); + if (false == expectedTemplate.equals(getTemplateResponse)) { + NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); + builder.compareMaps(getTemplateResponse, expectedTemplate); + logger.info("expected: {}\nactual:{}", expectedTemplate, getTemplateResponse); + fail("template doesn't match:\n" + builder.toString()); + } + } + + private void indexRandomDocuments( + final int count, + final boolean flushAllowed, + final boolean saveInfo, + final boolean specifyId, + final CheckedFunction docSupplier + ) throws IOException { + logger.info("Indexing {} random documents", count); + for (int i = 0; i < count; i++) { + logger.debug("Indexing document [{}]", i); + Request createDocument = new Request("POST", "/" + index + "/_doc/" + (specifyId ? i : "")); + createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); + client().performRequest(createDocument); + if (rarely()) { + refreshAllIndices(); + } + if (flushAllowed && rarely()) { + logger.debug("Flushing [{}]", index); + client().performRequest(new Request("POST", "/" + index + "/_flush")); + } + } + if (saveInfo) { + saveInfoDocument(index + "_count", Integer.toString(count)); + } + } + + private void indexDocument(String id) throws IOException { + final Request indexRequest = new Request("POST", "/" + index + "/" + "_doc/" + id); + indexRequest.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("f", "v").endObject())); + assertOK(client().performRequest(indexRequest)); + } + + private int countOfIndexedRandomDocuments() throws IOException { + return Integer.parseInt(loadInfoDocument(index + "_count")); + } + + private void saveInfoDocument(String id, String value) throws IOException { + XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); + infoDoc.field("value", value); + infoDoc.endObject(); + // Only create the first version so we know how many documents are created when the index is first created + Request request = new Request("PUT", "/info/_doc/" + id); + request.addParameter("op_type", "create"); + request.setJsonEntity(Strings.toString(infoDoc)); + client().performRequest(request); + } + + private String loadInfoDocument(String id) throws IOException { + Request request = new Request("GET", "/info/_doc/" + id); + request.addParameter("filter_path", "_source"); + String doc = toStr(client().performRequest(request)); + Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); + assertTrue(doc, m.find()); + return m.group(1); + } + + private List dataNodes(String indexName, RestClient client) throws IOException { + Request request = new Request("GET", indexName + "/_stats"); + request.addParameter("level", "shards"); + Response response = client.performRequest(request); + List nodes = new ArrayList<>(); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + indexName + ".shards.0"); + for (Object shard : shardStats) { + final String nodeId = ObjectPath.evaluate(shard, "routing.node"); + nodes.add(nodeId); + } + return nodes; + } + + /** + * Wait for an index to have green health, waiting longer than + * {@link ESRestTestCase#ensureGreen}. + */ + protected void ensureGreenLongWait(String indexName) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + indexName); + request.addParameter("timeout", "2m"); + request.addParameter("wait_for_status", "green"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("wait_for_events", "languid"); + request.addParameter("level", "shards"); + Map healthRsp = entityAsMap(client().performRequest(request)); + logger.info("health api response: {}", healthRsp); + assertEquals("green", healthRsp.get("status")); + assertFalse((Boolean) healthRsp.get("timed_out")); + } + + public void testPeerRecoveryRetentionLeases() throws Exception { + if (isRunningAgainstOldCluster()) { + XContentBuilder settings = jsonBuilder(); + settings.startObject(); + { + settings.startObject("settings"); + settings.field("number_of_shards", between(1, 5)); + settings.field("number_of_replicas", between(0, 1)); + settings.endObject(); + } + settings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(settings)); + client().performRequest(createIndex); + } + ensureGreen(index); + ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); + } + + /** + * Tests that with or without soft-deletes, we should perform an operation-based recovery if there were some + * but not too many uncommitted documents (i.e., less than 10% of committed documents or the extra translog) + * before we restart the cluster. This is important when we move from translog based to retention leases based + * peer recoveries. + */ + public void testOperationBasedRecovery() throws Exception { + if (isRunningAgainstOldCluster()) { + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); + if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); + } + final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; + createIndex(index, settings.build(), mappings); + ensureGreen(index); + int committedDocs = randomIntBetween(100, 200); + for (int i = 0; i < committedDocs; i++) { + indexDocument(Integer.toString(i)); + if (rarely()) { + flush(index, randomBoolean()); + } + } + flush(index, true); + ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); + // less than 10% of the committed docs (see IndexSetting#FILE_BASED_RECOVERY_THRESHOLD_SETTING). + int uncommittedDocs = randomIntBetween(0, (int) (committedDocs * 0.1)); + for (int i = 0; i < uncommittedDocs; i++) { + final String id = Integer.toString(randomIntBetween(1, 100)); + indexDocument(id); + } + } else { + ensureGreen(index); + assertNoFileBasedRecovery(index, n -> true); + ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); + } + } + + /** + * Verifies that once all shard copies on the new version, we should turn off the translog retention for indices with soft-deletes. + */ + public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { + if (isRunningAgainstOldCluster()) { + createIndex( + index, + Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .build() + ); + ensureGreen(index); + int numDocs = randomIntBetween(10, 100); + for (int i = 0; i < numDocs; i++) { + indexDocument(Integer.toString(randomIntBetween(1, 100))); + if (rarely()) { + flush(index, randomBoolean()); + } + } + } else { + ensureGreen(index); + flush(index, true); + assertEmptyTranslog(index); + ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); + } + } + + public void testResize() throws Exception { + int numDocs; + if (isRunningAgainstOldCluster()) { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); + if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); + } + final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; + createIndex(index, settings.build(), mappings); + numDocs = randomIntBetween(10, 1000); + for (int i = 0; i < numDocs; i++) { + indexDocument(Integer.toString(i)); + if (rarely()) { + flush(index, randomBoolean()); + } + } + saveInfoDocument("num_doc_" + index, Integer.toString(numDocs)); + ensureGreen(index); + } else { + ensureGreen(index); + numDocs = Integer.parseInt(loadInfoDocument("num_doc_" + index)); + int moreDocs = randomIntBetween(0, 100); + for (int i = 0; i < moreDocs; i++) { + indexDocument(Integer.toString(numDocs + i)); + if (rarely()) { + flush(index, randomBoolean()); + } + } + Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); + updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); + client().performRequest(updateSettingsRequest); + { + final String target = index + "_shrunken"; + Request shrinkRequest = new Request("PUT", "/" + index + "/_shrink/" + target); + Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); + } + shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); + client().performRequest(shrinkRequest); + ensureGreenLongWait(target); + assertNumHits(target, numDocs + moreDocs, 1); + } + { + final String target = index + "_split"; + Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); + } + Request splitRequest = new Request("PUT", "/" + index + "/_split/" + target); + splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); + client().performRequest(splitRequest); + ensureGreenLongWait(target); + assertNumHits(target, numDocs + moreDocs, 6); + } + { + final String target = index + "_cloned"; + client().performRequest(new Request("PUT", "/" + index + "/_clone/" + target)); + ensureGreenLongWait(target); + assertNumHits(target, numDocs + moreDocs, 3); + } + } + } + + @SuppressWarnings("unchecked") + public void testSystemIndexMetadataIsUpgraded() throws Exception { + assumeTrue(".tasks became a system index in 7.10.0", getOldClusterVersion().onOrAfter(Version.V_7_10_0)); + final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " + + "access to system indices will be prevented by default"; + if (isRunningAgainstOldCluster()) { + // create index + Request createTestIndex = new Request("PUT", "/test_index_old"); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.setJsonEntity(""" + {"index": {"_index": "test_index_old"}} + {"f1": "v1", "f2": "v2"} + """); + client().performRequest(bulk); + + // start a async reindex job + Request reindex = new Request("POST", "/_reindex"); + reindex.setJsonEntity(""" + { + "source":{ + "index":"test_index_old" + }, + "dest":{ + "index":"test_index_reindex" + } + }"""); + reindex.addParameter("wait_for_completion", "false"); + Map response = entityAsMap(client().performRequest(reindex)); + String taskId = (String) response.get("task"); + + // wait for task + Request getTask = new Request("GET", "/_tasks/" + taskId); + getTask.addParameter("wait_for_completion", "true"); + client().performRequest(getTask); + + // make sure .tasks index exists + Request getTasksIndex = new Request("GET", "/.tasks"); + getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { + v.current(systemIndexWarning); + v.compatible(systemIndexWarning); + })); + getTasksIndex.addParameter("allow_no_indices", "false"); + + getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { + v.current(systemIndexWarning); + v.compatible(systemIndexWarning); + })); + assertBusy(() -> { + try { + assertThat(client().performRequest(getTasksIndex).getStatusLine().getStatusCode(), is(200)); + } catch (ResponseException e) { + throw new AssertionError(".tasks index does not exist yet"); + } + }); + + // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets + // upgraded properly. If we're already on 8.x, skip this part of the test. + if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + // Create an alias to make sure it gets upgraded properly + Request putAliasRequest = new Request("POST", "/_aliases"); + putAliasRequest.setJsonEntity(""" + { + "actions": [ + {"add": {"index": ".tasks", "alias": "test-system-alias"}}, + {"add": {"index": "test_index_reindex", "alias": "test-system-alias"}} + ] + }"""); + putAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { + v.current(systemIndexWarning); + v.compatible(systemIndexWarning); + })); + assertThat(client().performRequest(putAliasRequest).getStatusLine().getStatusCode(), is(200)); + } + } else { + assertBusy(() -> { + Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); + Map indices = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))) + .get("metadata.indices"); + + // Make sure our non-system index is still non-system + assertThat(new XContentTestUtils.JsonMapView(indices).get("test_index_old.system"), is(false)); + + // Can't get the .tasks index via JsonMapView because it splits on `.` + assertThat(indices, hasKey(".tasks")); + XContentTestUtils.JsonMapView tasksIndex = new XContentTestUtils.JsonMapView((Map) indices.get(".tasks")); + assertThat(tasksIndex.get("system"), is(true)); + + // If .tasks was created in a 7.x version, it should have an alias on it that we need to make sure got upgraded properly. + final String tasksCreatedVersionString = tasksIndex.get("settings.index.version.created"); + assertThat(tasksCreatedVersionString, notNullValue()); + final Version tasksCreatedVersion = Version.fromId(Integer.parseInt(tasksCreatedVersionString)); + if (tasksCreatedVersion.before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { + // Verify that the alias survived the upgrade + Request getAliasRequest = new Request("GET", "/_alias/test-system-alias"); + getAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { + v.current(systemIndexWarning); + v.compatible(systemIndexWarning); + })); + Map aliasResponse = entityAsMap(client().performRequest(getAliasRequest)); + assertThat(aliasResponse, hasKey(".tasks")); + assertThat(aliasResponse, hasKey("test_index_reindex")); + } + }); + } + } + + public void testEnableSoftDeletesOnRestore() throws Exception { + assumeTrue("soft deletes must be enabled on 8.0+", getOldClusterVersion().before(Version.V_8_0_0)); + final String snapshot = "snapshot-" + index; + if (isRunningAgainstOldCluster()) { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); + createIndex(index, settings.build()); + ensureGreen(index); + int numDocs = randomIntBetween(0, 100); + indexRandomDocuments( + numDocs, + true, + true, + randomBoolean(), + i -> jsonBuilder().startObject().field("field", "value").endObject() + ); + // create repo + XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); + { + repoConfig.field("type", "fs"); + repoConfig.startObject("settings"); + { + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); + } + repoConfig.endObject(); + } + repoConfig.endObject(); + Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); + createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); + client().performRequest(createRepoRequest); + // create snapshot + Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); + createSnapshot.addParameter("wait_for_completion", "true"); + createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); + client().performRequest(createSnapshot); + } else { + String restoredIndex = "restored-" + index; + // Restore + XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", restoredIndex); + restoreCommand.startObject("index_settings"); + { + restoreCommand.field("index.soft_deletes.enabled", true); + } + restoreCommand.endObject(); + restoreCommand.endObject(); + Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); + restoreRequest.addParameter("wait_for_completion", "true"); + restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); + client().performRequest(restoreRequest); + ensureGreen(restoredIndex); + int numDocs = countOfIndexedRandomDocuments(); + assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + restoredIndex + "/_search")))); + } + } + + public void testForbidDisableSoftDeletesOnRestore() throws Exception { + final String snapshot = "snapshot-" + index; + if (isRunningAgainstOldCluster()) { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); + createIndex(index, settings.build()); + ensureGreen(index); + int numDocs = randomIntBetween(0, 100); + indexRandomDocuments( + numDocs, + true, + true, + randomBoolean(), + i -> jsonBuilder().startObject().field("field", "value").endObject() + ); + // create repo + XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); + { + repoConfig.field("type", "fs"); + repoConfig.startObject("settings"); + { + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); + } + repoConfig.endObject(); + } + repoConfig.endObject(); + Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); + createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); + client().performRequest(createRepoRequest); + // create snapshot + Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); + createSnapshot.addParameter("wait_for_completion", "true"); + createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); + client().performRequest(createSnapshot); + } else { + // Restore + XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored-" + index); + restoreCommand.startObject("index_settings"); + { + restoreCommand.field("index.soft_deletes.enabled", false); + } + restoreCommand.endObject(); + restoreCommand.endObject(); + Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); + restoreRequest.addParameter("wait_for_completion", "true"); + restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); + final ResponseException error = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); + assertThat(error.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); + } + } + + /** + * In 7.14 the cluster.remote.*.transport.compress setting was change from a boolean to an enum setting + * with true/false as options. This test ensures that the old boolean setting in cluster state is + * translated properly. This test can be removed in 9.0. + */ + public void testTransportCompressionSetting() throws IOException { + assumeTrue("the old transport.compress setting existed before 7.14", getOldClusterVersion().before(Version.V_7_14_0)); + assumeTrue( + "Early versions of 6.x do not have cluster.remote* prefixed settings", + getOldClusterVersion().onOrAfter(Version.V_7_14_0.minimumCompatibilityVersion()) + ); + if (isRunningAgainstOldCluster()) { + final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("persistent"); + { + builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); + builder.field("cluster.remote.foo.transport.compress", "true"); + } + builder.endObject(); + } + builder.endObject(); + putSettingsRequest.setJsonEntity(Strings.toString(builder)); + } + client().performRequest(putSettingsRequest); + } else { + final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); + final Response getSettingsResponse = client().performRequest(getSettingsRequest); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { + final Settings settings = RestClusterGetSettingsResponse.fromXContent(parser).getPersistentSettings(); + assertThat(REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("foo").get(settings), equalTo(Compression.Enabled.TRUE)); + } + } + } + + public static void assertNumHits(String index, int numHits, int totalShards) throws IOException { + Map resp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); + assertNoFailures(resp); + assertThat(XContentMapValues.extractValue("_shards.total", resp), equalTo(totalShards)); + assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); + assertThat(extractTotalHits(resp), equalTo(numHits)); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java new file mode 100644 index 000000000000..232619ee93bb --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartTestOrdering.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.TestMethodAndParams; + +import java.util.Comparator; + +public class FullClusterRestartTestOrdering implements Comparator { + @Override + public int compare(TestMethodAndParams o1, TestMethodAndParams o2) { + return Integer.compare(getOrdinal(o1), getOrdinal(o2)); + } + + private int getOrdinal(TestMethodAndParams t) { + return ((FullClusterRestartUpgradeStatus) t.getInstanceArguments().get(0)).ordinal(); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java new file mode 100644 index 000000000000..06048d020e2a --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartUpgradeStatus.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +public enum FullClusterRestartUpgradeStatus { + OLD, + UPGRADED +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java new file mode 100644 index 000000000000..a064c8774380 --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.AfterClass; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Locale; + +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.OLD; +import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.UPGRADED; + +@TestCaseOrdering(FullClusterRestartTestOrdering.class) +public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.17.0"); + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + private static boolean upgradeFailed = false; + private static boolean upgraded = false; + private final FullClusterRestartUpgradeStatus requestedUpgradeStatus; + + public ParameterizedFullClusterRestartTestCase(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + this.requestedUpgradeStatus = upgradeStatus; + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return Arrays.stream(FullClusterRestartUpgradeStatus.values()).map(v -> new Object[] { v }).toList(); + } + + @Before + public void maybeUpgrade() throws Exception { + if (upgraded == false && requestedUpgradeStatus == UPGRADED) { + try { + if (OLD_CLUSTER_VERSION.before(MINIMUM_WIRE_COMPATIBLE_VERSION)) { + // First upgrade to latest wire compatible version + getUpgradeCluster().upgradeToVersion(MINIMUM_WIRE_COMPATIBLE_VERSION); + } + getUpgradeCluster().upgradeToVersion(Version.CURRENT); + closeClients(); + initClient(); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } finally { + upgraded = true; + } + } + + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + } + + @AfterClass + public static void resetUpgrade() { + upgraded = false; + upgradeFailed = false; + } + + public boolean isRunningAgainstOldCluster() { + return requestedUpgradeStatus == OLD; + } + + public static org.elasticsearch.Version getOldClusterVersion() { + return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + public static Version getOldClusterTestVersion() { + return Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + protected abstract ElasticsearchCluster getUpgradeCluster(); + + @Override + protected String getTestRestCluster() { + return getUpgradeCluster().getHttpAddresses(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + protected String getRootTestName() { + return getTestName().split(" ")[0].toLowerCase(Locale.ROOT); + } +} diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java new file mode 100644 index 000000000000..1636644409fc --- /dev/null +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -0,0 +1,256 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ConstantScoreQueryBuilder; +import org.elasticsearch.index.query.DisMaxQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MatchNoneQueryBuilder; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.SpanNearQueryBuilder; +import org.elasticsearch.index.query.SpanTermQueryBuilder; +import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.xcontent.XContentBuilder; +import org.junit.ClassRule; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; + +/** + * An integration test that tests whether percolator queries stored in older supported ES version can still be read by the + * current ES version. Percolator queries are stored in the binary format in a dedicated doc values field (see + * PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test + * does best effort verifying that we don't break bwc for query builders between the first previous major version and + * the latest current major release. + * + * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the + * json format of a query being tested here then feel free to change this. + */ +public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase { + private static final List CANDIDATES = new ArrayList<>(); + + protected static LocalClusterConfigProvider clusterConfig = c -> {}; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(2) + .setting("xpack.security.enabled", "false") + .apply(() -> clusterConfig) + .build(); + + @Override + protected ElasticsearchCluster getUpgradeCluster() { + return cluster; + } + + public QueryBuilderBWCIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) { + super(upgradeStatus); + } + + static { + addCandidate(""" + "match": { "text_field": "value"} + """, new MatchQueryBuilder("text_field", "value")); + addCandidate(""" + "match": { "text_field": {"query": "value", "operator": "and"} } + """, new MatchQueryBuilder("text_field", "value").operator(Operator.AND)); + addCandidate(""" + "match": { "text_field": {"query": "value", "analyzer": "english"} } + """, new MatchQueryBuilder("text_field", "value").analyzer("english")); + addCandidate(""" + "match": { "text_field": {"query": "value", "minimum_should_match": 3} } + """, new MatchQueryBuilder("text_field", "value").minimumShouldMatch("3")); + addCandidate(""" + "match": { "text_field": {"query": "value", "fuzziness": "auto"} } + """, new MatchQueryBuilder("text_field", "value").fuzziness(Fuzziness.AUTO)); + addCandidate(""" + "match_phrase": { "text_field": "value"} + """, new MatchPhraseQueryBuilder("text_field", "value")); + addCandidate(""" + "match_phrase": { "text_field": {"query": "value", "slop": 3}} + """, new MatchPhraseQueryBuilder("text_field", "value").slop(3)); + addCandidate(""" + "range": { "long_field": {"gte": 1, "lte": 9}} + """, new RangeQueryBuilder("long_field").from(1).to(9)); + addCandidate( + """ + "bool": { "must_not": [{"match_none": {}}], "must": [{"match_all": {}}], "filter": [{"match_all": {}}], \ + "should": [{"match_all": {}}]} + """, + new BoolQueryBuilder().mustNot(new MatchNoneQueryBuilder()) + .must(new MatchAllQueryBuilder()) + .filter(new MatchAllQueryBuilder()) + .should(new MatchAllQueryBuilder()) + ); + addCandidate( + """ + "dis_max": {"queries": [{"match_all": {}},{"match_all": {}},{"match_all": {}}], "tie_breaker": 0.01} + """, + new DisMaxQueryBuilder().add(new MatchAllQueryBuilder()) + .add(new MatchAllQueryBuilder()) + .add(new MatchAllQueryBuilder()) + .tieBreaker(0.01f) + ); + addCandidate(""" + "constant_score": {"filter": {"match_all": {}}, "boost": 0.1} + """, new ConstantScoreQueryBuilder(new MatchAllQueryBuilder()).boost(0.1f)); + addCandidate( + """ + "function_score": {"query": {"match_all": {}},"functions": [{"random_score": {}, "filter": {"match_all": {}}, \ + "weight": 0.2}]} + """, + new FunctionScoreQueryBuilder( + new MatchAllQueryBuilder(), + new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { + new FunctionScoreQueryBuilder.FilterFunctionBuilder( + new MatchAllQueryBuilder(), + new RandomScoreFunctionBuilder().setWeight(0.2f) + ) } + ) + ); + addCandidate( + """ + "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ + { "span_term": { "keyword_field": "value2" }}]} + """, + new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 0).addClause( + new SpanTermQueryBuilder("keyword_field", "value2") + ) + ); + addCandidate( + """ + "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ + { "span_term": { "keyword_field": "value2" }}], "slop": 2} + """, + new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 2).addClause( + new SpanTermQueryBuilder("keyword_field", "value2") + ) + ); + addCandidate( + """ + "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ + { "span_term": { "keyword_field": "value2" }}], "slop": 2, "in_order": false} + """, + new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 2).addClause( + new SpanTermQueryBuilder("keyword_field", "value2") + ).inOrder(false) + ); + } + + private static void addCandidate(String querySource, QueryBuilder expectedQb) { + CANDIDATES.add(new Object[] { "{\"query\": {" + querySource + "}}", expectedQb }); + } + + public void testQueryBuilderBWC() throws Exception { + String index = "queries"; + if (isRunningAgainstOldCluster()) { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", 0); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("query"); + mappingsAndSettings.field("type", "percolator"); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("keyword_field"); + mappingsAndSettings.field("type", "keyword"); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("text_field"); + mappingsAndSettings.field("type", "text"); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("long_field"); + mappingsAndSettings.field("type", "long"); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + Request request = new Request("PUT", "/" + index); + request.setJsonEntity(Strings.toString(mappingsAndSettings)); + Response rsp = client().performRequest(request); + assertEquals(200, rsp.getStatusLine().getStatusCode()); + + for (int i = 0; i < CANDIDATES.size(); i++) { + request = new Request("PUT", "/" + index + "/_doc/" + Integer.toString(i)); + request.setJsonEntity((String) CANDIDATES.get(i)[0]); + rsp = client().performRequest(request); + assertEquals(201, rsp.getStatusLine().getStatusCode()); + } + } else { + NamedWriteableRegistry registry = new NamedWriteableRegistry( + new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables() + ); + + for (int i = 0; i < CANDIDATES.size(); i++) { + QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; + Request request = new Request("GET", "/" + index + "/_search"); + request.setJsonEntity(Strings.format(""" + {"query": {"ids": {"values": ["%s"]}}, "docvalue_fields": [{"field":"query.query_builder_field"}]} + """, i)); + Response rsp = client().performRequest(request); + assertEquals(200, rsp.getStatusLine().getStatusCode()); + var hitRsp = (Map) ((List) ((Map) responseAsMap(rsp).get("hits")).get("hits")).get(0); + String queryBuilderStr = (String) ((List) ((Map) hitRsp.get("fields")).get("query.query_builder_field")).get(0); + byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr); + try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) { + try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { + input.setVersion(getOldClusterVersion()); + QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); + assert in.read() == -1; + assertEquals(expectedQueryBuilder, queryBuilder); + } + } + } + } + } +} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java deleted file mode 100644 index 7e8b88131814..000000000000 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ /dev/null @@ -1,1865 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.upgrades; - -import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.settings.RestClusterGetSettingsResponse; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.MetadataIndexStateService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateUtils; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.Booleans; -import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; -import org.elasticsearch.test.NotEqualMessageBuilder; -import org.elasticsearch.test.XContentTestUtils; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.ObjectPath; -import org.elasticsearch.transport.Compression; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; -import org.junit.Before; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Base64; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.IntStream; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static java.util.stream.Collectors.toList; -import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_VERSION; -import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; -import static org.elasticsearch.test.MapMatcher.assertMap; -import static org.elasticsearch.test.MapMatcher.matchesMap; -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_COMPRESS; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -/** - * Tests to run before and after a full cluster restart. This is run twice, - * one with {@code tests.is_old_cluster} set to {@code true} against a cluster - * of an older version. The cluster is shutdown and a cluster of the new - * version is started with the same data directories and then this is rerun - * with {@code tests.is_old_cluster} set to {@code false}. - */ -public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { - - private String index; - - @Before - public void setIndex() { - index = getTestName().toLowerCase(Locale.ROOT); - } - - public void testSearch() throws Exception { - int count; - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 0); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("string"); - mappingsAndSettings.field("type", "text"); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("dots_in_field_names"); - mappingsAndSettings.field("type", "text"); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("binary"); - mappingsAndSettings.field("type", "binary"); - mappingsAndSettings.field("store", "true"); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - count = randomIntBetween(2000, 3000); - byte[] randomByteArray = new byte[16]; - random().nextBytes(randomByteArray); - indexRandomDocuments( - count, - true, - true, - randomBoolean(), - i -> JsonXContent.contentBuilder() - .startObject() - .field("string", randomAlphaOfLength(10)) - .field("int", randomInt(100)) - .field("float", randomFloat()) - // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct - .field("bool", i > 0 && randomBoolean()) - .field("field.with.dots", randomAlphaOfLength(10)) - .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) - .endObject() - ); - refreshAllIndices(); - } else { - count = countOfIndexedRandomDocuments(); - } - - ensureGreenLongWait(index); - assertBasicSearchWorks(count); - assertAllSearchWorks(count); - assertBasicAggregationWorks(); - assertRealtimeGetWorks(); - assertStoredBinaryFields(count); - } - - public void testNewReplicas() throws Exception { - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 0); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("field"); - mappingsAndSettings.field("type", "text"); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - int numDocs = randomIntBetween(2000, 3000); - indexRandomDocuments( - numDocs, - true, - false, - randomBoolean(), - i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() - ); - logger.info("Refreshing [{}]", index); - client().performRequest(new Request("POST", "/" + index + "/_refresh")); - } else { - // The test runs with two nodes so this should still go green. - final int numReplicas = 1; - final long startTime = System.currentTimeMillis(); - logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); - Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); - setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); - client().performRequest(setNumberOfReplicas); - - ensureGreenLongWait(index); - - logger.debug("--> index [{}] is green, took [{}] ms", index, (System.currentTimeMillis() - startTime)); - Map recoverRsp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_recovery"))); - logger.debug("--> recovery status:\n{}", recoverRsp); - - Set counts = new HashSet<>(); - for (String node : dataNodes(index, client())) { - Request search = new Request("GET", "/" + index + "/_search"); - search.addParameter("preference", "_only_nodes:" + node); - Map responseBody = entityAsMap(client().performRequest(search)); - assertNoFailures(responseBody); - int hits = extractTotalHits(responseBody); - counts.add(hits); - } - assertEquals("All nodes should have a consistent number of documents", 1, counts.size()); - } - } - - public void testSearchTimeSeriesMode() throws Exception { - assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); - int numDocs; - if (isRunningAgainstOldCluster()) { - numDocs = createTimeSeriesModeIndex(1); - } else { - numDocs = countOfIndexedRandomDocuments(); - } - assertCountAll(numDocs); - Request request = new Request("GET", "/" + index + "/_search"); - XContentBuilder body = jsonBuilder().startObject(); - body.field("size", 0); - body.startObject("aggs").startObject("check").startObject("scripted_metric"); - { - body.field("init_script", "state.timeSeries = new HashSet()"); - body.field("map_script", "state.timeSeries.add(doc['dim'].value)"); - body.field("combine_script", "return state.timeSeries"); - StringBuilder reduceScript = new StringBuilder(); - reduceScript.append("Set timeSeries = new TreeSet();"); - reduceScript.append("for (s in states) {"); - reduceScript.append(" for (ts in s) {"); - reduceScript.append(" boolean newTs = timeSeries.add(ts);"); - reduceScript.append(" if (false == newTs) {"); - reduceScript.append(" throw new IllegalArgumentException(ts + ' appeared in two shards');"); - reduceScript.append(" }"); - reduceScript.append(" }"); - reduceScript.append("}"); - reduceScript.append("return timeSeries;"); - body.field("reduce_script", reduceScript.toString()); - } - body.endObject().endObject().endObject(); - body.endObject(); - request.setJsonEntity(Strings.toString(body)); - Map response = entityAsMap(client().performRequest(request)); - assertMap( - response, - matchesMap().extraOk() - .entry("hits", matchesMap().extraOk().entry("total", Map.of("value", numDocs, "relation", "eq"))) - .entry("aggregations", Map.of("check", Map.of("value", IntStream.range(0, 10).mapToObj(i -> "dim" + i).collect(toList())))) - ); - } - - public void testNewReplicasTimeSeriesMode() throws Exception { - assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); - if (isRunningAgainstOldCluster()) { - createTimeSeriesModeIndex(0); - } else { - // The test runs with two nodes so this should still go green. - final int numReplicas = 1; - final long startTime = System.currentTimeMillis(); - logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); - Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); - setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); - client().performRequest(setNumberOfReplicas); - - ensureGreenLongWait(index); - - logger.debug("--> index [{}] is green, took [{}] ms", index, (System.currentTimeMillis() - startTime)); - Map recoverRsp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_recovery"))); - logger.debug("--> recovery status:\n{}", recoverRsp); - - Set counts = new HashSet<>(); - for (String node : dataNodes(index, client())) { - Request search = new Request("GET", "/" + index + "/_search"); - search.addParameter("preference", "_only_nodes:" + node); - Map responseBody = entityAsMap(client().performRequest(search)); - assertNoFailures(responseBody); - int hits = extractTotalHits(responseBody); - counts.add(hits); - } - assertEquals("All nodes should have a consistent number of documents", 1, counts.size()); - } - } - - private int createTimeSeriesModeIndex(int replicas) throws IOException { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", replicas); - mappingsAndSettings.field("mode", "time_series"); - mappingsAndSettings.field("routing_path", "dim"); - mappingsAndSettings.field("time_series.start_time", 1L); - mappingsAndSettings.field("time_series.end_time", DateUtils.MAX_MILLIS_BEFORE_9999 - 1); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("@timestamp").field("type", "date").endObject(); - mappingsAndSettings.startObject("dim").field("type", "keyword").field("time_series_dimension", true).endObject(); - } - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - int numDocs = randomIntBetween(2000, 3000); - long basetime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2021-01-01T00:00:00Z"); - indexRandomDocuments( - numDocs, - true, - true, - false, - i -> JsonXContent.contentBuilder() - .startObject() - .field("@timestamp", basetime + TimeUnit.MINUTES.toMillis(i)) - .field("dim", "dim" + (i % 10)) - .endObject() - ); - logger.info("Refreshing [{}]", index); - client().performRequest(new Request("POST", "/" + index + "/_refresh")); - return numDocs; - } - - public void testClusterState() throws Exception { - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - mappingsAndSettings.field("index_patterns", index); - mappingsAndSettings.field("order", "1000"); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 0); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createTemplate = new Request("PUT", "/_template/template_1"); - createTemplate.setJsonEntity(Strings.toString(mappingsAndSettings)); - createTemplate.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); - client().performRequest(createTemplate); - client().performRequest(new Request("PUT", "/" + index)); - } - - // verifying if we can still read some properties from cluster state api: - Map clusterState = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); - - // Check some global properties: - String numberOfShards = (String) XContentMapValues.extractValue( - "metadata.templates.template_1.settings.index.number_of_shards", - clusterState - ); - assertEquals("1", numberOfShards); - String numberOfReplicas = (String) XContentMapValues.extractValue( - "metadata.templates.template_1.settings.index.number_of_replicas", - clusterState - ); - assertEquals("0", numberOfReplicas); - - // Check some index properties: - numberOfShards = (String) XContentMapValues.extractValue( - "metadata.indices." + index + ".settings.index.number_of_shards", - clusterState - ); - assertEquals("1", numberOfShards); - numberOfReplicas = (String) XContentMapValues.extractValue( - "metadata.indices." + index + ".settings.index.number_of_replicas", - clusterState - ); - assertEquals("0", numberOfReplicas); - Version version = Version.fromId( - Integer.valueOf( - (String) XContentMapValues.extractValue("metadata.indices." + index + ".settings.index.version.created", clusterState) - ) - ); - assertEquals(getOldClusterVersion(), version); - - } - - public void testShrink() throws IOException { - String shrunkenIndex = index + "_shrunk"; - int numDocs; - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("mappings"); - { - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("field"); - { - mappingsAndSettings.field("type", "text"); - } - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - mappingsAndSettings.startObject("settings"); - { - mappingsAndSettings.field("index.number_of_shards", 5); - } - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - numDocs = randomIntBetween(512, 1024); - indexRandomDocuments( - numDocs, - true, - true, - randomBoolean(), - i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() - ); - - ensureGreen(index); // wait for source index to be available on both nodes before starting shrink - - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); - - Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - - shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); - client().performRequest(shrinkIndexRequest); - - refreshAllIndices(); - } else { - numDocs = countOfIndexedRandomDocuments(); - } - - Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); - assertNoFailures(response); - int totalShards = (int) XContentMapValues.extractValue("_shards.total", response); - assertThat(totalShards, greaterThan(1)); - int successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); - assertEquals(totalShards, successfulShards); - int totalHits = extractTotalHits(response); - assertEquals(numDocs, totalHits); - - response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); - assertNoFailures(response); - totalShards = (int) XContentMapValues.extractValue("_shards.total", response); - assertEquals(1, totalShards); - successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); - assertEquals(1, successfulShards); - totalHits = extractTotalHits(response); - assertEquals(numDocs, totalHits); - } - - public void testShrinkAfterUpgrade() throws IOException { - String shrunkenIndex = index + "_shrunk"; - int numDocs; - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("mappings"); - { - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("field"); - { - mappingsAndSettings.field("type", "text"); - } - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - // the default number of shards is now one so we have to set the number of shards to be more than one explicitly - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("index.number_of_shards", 5); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - numDocs = randomIntBetween(512, 1024); - indexRandomDocuments( - numDocs, - true, - true, - randomBoolean(), - i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() - ); - } else { - ensureGreen(index); // wait for source index to be available on both nodes before starting shrink - - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); - - Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); - client().performRequest(shrinkIndexRequest); - - numDocs = countOfIndexedRandomDocuments(); - } - - refreshAllIndices(); - - Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); - assertNoFailures(response); - int totalShards = (int) XContentMapValues.extractValue("_shards.total", response); - assertThat(totalShards, greaterThan(1)); - int successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); - assertEquals(totalShards, successfulShards); - int totalHits = extractTotalHits(response); - assertEquals(numDocs, totalHits); - - if (isRunningAgainstOldCluster() == false) { - response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); - assertNoFailures(response); - totalShards = (int) XContentMapValues.extractValue("_shards.total", response); - assertEquals(1, totalShards); - successfulShards = (int) XContentMapValues.extractValue("_shards.successful", response); - assertEquals(1, successfulShards); - totalHits = extractTotalHits(response); - assertEquals(numDocs, totalHits); - } - } - - /** - * Test upgrading after a rollover. Specifically: - *
    - *
  1. Create an index with a write alias - *
  2. Write some documents to the write alias - *
  3. Roll over the index - *
  4. Make sure the document count is correct - *
  5. Upgrade - *
  6. Write some more documents to the write alias - *
  7. Make sure the document count is correct - *
- */ - public void testRollover() throws IOException { - if (isRunningAgainstOldCluster()) { - Request createIndex = new Request("PUT", "/" + index + "-000001"); - createIndex.setJsonEntity(formatted(""" - { - "aliases": { - "%s_write": {} - } - }""", index)); - client().performRequest(createIndex); - } - - int bulkCount = 10; - String bulk = """ - {"index":{}} - {"test":"test"} - """.repeat(bulkCount); - - Request bulkRequest = new Request("POST", "/" + index + "_write/_bulk"); - - bulkRequest.setJsonEntity(bulk); - bulkRequest.addParameter("refresh", ""); - assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); - - if (isRunningAgainstOldCluster()) { - Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); - rolloverRequest.setJsonEntity(""" - { "conditions": { "max_docs": 5 }}"""); - client().performRequest(rolloverRequest); - - assertThat( - EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), - containsString("testrollover-000002") - ); - } - - Request countRequest = new Request("POST", "/" + index + "-*/_search"); - countRequest.addParameter("size", "0"); - Map count = entityAsMap(client().performRequest(countRequest)); - assertNoFailures(count); - - int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount); - assertEquals(expectedCount, extractTotalHits(count)); - } - - void assertCountAll(int count) throws IOException { - Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); - assertNoFailures(response); - int numDocs = extractTotalHits(response); - logger.info("Found {} in old index", numDocs); - assertEquals(count, numDocs); - } - - void assertBasicSearchWorks(int count) throws IOException { - logger.info("--> testing basic search"); - { - assertCountAll(count); - } - - logger.info("--> testing basic search with sort"); - { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "sort": [{ "int" : "asc" }]}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); - assertNoFailures(response); - assertTotalHits(count, response); - } - - logger.info("--> testing exists filter"); - { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "exists" : {"field": "string"} }}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); - assertNoFailures(response); - assertTotalHits(count, response); - } - - logger.info("--> testing field with dots in the name"); - { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "exists" : {"field": "field.with.dots"} }}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); - assertNoFailures(response); - assertTotalHits(count, response); - } - } - - void assertAllSearchWorks(int count) throws IOException { - logger.info("--> testing _all search"); - Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); - assertNoFailures(response); - assertTotalHits(count, response); - Map bestHit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", response))).get(0); - - // Make sure there are payloads and they are taken into account for the score - // the 'string' field has a boost of 4 in the mappings so it should get a payload boost - String stringValue = (String) XContentMapValues.extractValue("_source.string", bestHit); - assertNotNull(stringValue); - String id = (String) bestHit.get("_id"); - - Request explainRequest = new Request("GET", "/" + index + "/_explain/" + id); - explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); - String explanation = toStr(client().performRequest(explainRequest)); - assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); - - // Make sure the query can run on the whole index - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setEntity(explainRequest.getEntity()); - searchRequest.addParameter("explain", "true"); - Map matchAllResponse = entityAsMap(client().performRequest(searchRequest)); - assertNoFailures(matchAllResponse); - assertTotalHits(count, matchAllResponse); - } - - void assertBasicAggregationWorks() throws IOException { - // histogram on a long - Request longHistogramRequest = new Request("GET", "/" + index + "/_search"); - longHistogramRequest.setJsonEntity(""" - { - "aggs": { - "histo": { - "histogram": { - "field": "int", - "interval": 10 - } - } - } - }"""); - Map longHistogram = entityAsMap(client().performRequest(longHistogramRequest)); - assertNoFailures(longHistogram); - List histoBuckets = (List) XContentMapValues.extractValue("aggregations.histo.buckets", longHistogram); - int histoCount = 0; - for (Object entry : histoBuckets) { - Map bucket = (Map) entry; - histoCount += (Integer) bucket.get("doc_count"); - } - assertTotalHits(histoCount, longHistogram); - - // terms on a boolean - Request boolTermsRequest = new Request("GET", "/" + index + "/_search"); - boolTermsRequest.setJsonEntity(""" - { - "aggs": { - "bool_terms": { - "terms": { - "field": "bool" - } - } - } - }"""); - Map boolTerms = entityAsMap(client().performRequest(boolTermsRequest)); - List termsBuckets = (List) XContentMapValues.extractValue("aggregations.bool_terms.buckets", boolTerms); - int termsCount = 0; - for (Object entry : termsBuckets) { - Map bucket = (Map) entry; - termsCount += (Integer) bucket.get("doc_count"); - } - assertTotalHits(termsCount, boolTerms); - } - - void assertRealtimeGetWorks() throws IOException { - Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - disableAutoRefresh.setJsonEntity(""" - { "index": { "refresh_interval" : -1 }}"""); - client().performRequest(disableAutoRefresh); - - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "match_all" : {} }}"""); - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); - Map hit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); - String docId = (String) hit.get("_id"); - - Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); - updateRequest.setJsonEntity(""" - { "doc" : { "foo": "bar"}}"""); - client().performRequest(updateRequest); - - Request getRequest = new Request("GET", "/" + index + "/_doc/" + docId); - - Map getRsp = entityAsMap(client().performRequest(getRequest)); - Map source = (Map) getRsp.get("_source"); - assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); - - Request enableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - enableAutoRefresh.setJsonEntity(""" - { "index": { "refresh_interval" : "1s" }}"""); - client().performRequest(enableAutoRefresh); - } - - void assertStoredBinaryFields(int count) throws Exception { - Request request = new Request("GET", "/" + index + "/_search"); - request.setJsonEntity(""" - { - "query": { - "match_all": {} - }, - "size": 100, - "stored_fields": "binary" - }"""); - Map rsp = entityAsMap(client().performRequest(request)); - - assertTotalHits(count, rsp); - List hits = (List) XContentMapValues.extractValue("hits.hits", rsp); - assertEquals(100, hits.size()); - for (Object hit : hits) { - Map hitRsp = (Map) hit; - List values = (List) XContentMapValues.extractValue("fields.binary", hitRsp); - assertEquals(1, values.size()); - String value = (String) values.get(0); - byte[] binaryValue = Base64.getDecoder().decode(value); - assertEquals("Unexpected string length [" + value + "]", 16, binaryValue.length); - } - } - - static String toStr(Response response) throws IOException { - return EntityUtils.toString(response.getEntity()); - } - - static void assertNoFailures(Map response) { - int failed = (int) XContentMapValues.extractValue("_shards.failed", response); - assertEquals(0, failed); - } - - void assertTotalHits(int expectedTotalHits, Map response) { - int actualTotalHits = extractTotalHits(response); - assertEquals(response.toString(), expectedTotalHits, actualTotalHits); - } - - static int extractTotalHits(Map response) { - return (Integer) XContentMapValues.extractValue("hits.total.value", response); - } - - /** - * Tests that a single document survives. Super basic smoke test. - */ - public void testSingleDoc() throws IOException { - String docLocation = "/" + index + "/_doc/1"; - String doc = "{\"test\": \"test\"}"; - - if (isRunningAgainstOldCluster()) { - Request createDoc = new Request("PUT", docLocation); - createDoc.setJsonEntity(doc); - client().performRequest(createDoc); - } - - Request request = new Request("GET", docLocation); - assertThat(toStr(client().performRequest(request)), containsString(doc)); - } - - /** - * Tests that a single empty shard index is correctly recovered. Empty shards are often an edge case. - */ - public void testEmptyShard() throws IOException { - final String indexName = "test_empty_shard"; - - if (isRunningAgainstOldCluster()) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - // if the node with the replica is the first to be restarted, while a replica is still recovering - // then delayed allocation will kick in. When the node comes back, the master will search for a copy - // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN - // before timing out - .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") - .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - createIndex(indexName, settings.build()); - } - ensureGreen(indexName); - } - - /** - * Tests recovery of an index with or without a translog and the - * statistics we gather about that. - */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/52031") - public void testRecovery() throws Exception { - int count; - boolean shouldHaveTranslog; - if (isRunningAgainstOldCluster()) { - count = between(200, 300); - /* We've had bugs in the past where we couldn't restore - * an index without a translog so we randomize whether - * or not we have one. */ - shouldHaveTranslog = randomBoolean(); - Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); - } - final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; - createIndex(index, settings.build(), mappings); - indexRandomDocuments(count, true, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); - - // make sure all recoveries are done - ensureGreen(index); - - // Force flush so we're sure that all translog are committed - Request flushRequest = new Request("POST", "/" + index + "/_flush"); - flushRequest.addParameter("force", "true"); - flushRequest.addParameter("wait_if_ongoing", "true"); - assertOK(client().performRequest(flushRequest)); - - if (shouldHaveTranslog) { - // Update a few documents so we are sure to have a translog - indexRandomDocuments( - count / 10, - false, // flushing here would invalidate the whole thing - false, - true, - i -> jsonBuilder().startObject().field("field", "value").endObject() - ); - } - saveInfoDocument(index + "_should_have_translog", Boolean.toString(shouldHaveTranslog)); - } else { - count = countOfIndexedRandomDocuments(); - shouldHaveTranslog = Booleans.parseBoolean(loadInfoDocument(index + "_should_have_translog")); - } - - // Count the documents in the index to make sure we have as many as we put there - Request countRequest = new Request("GET", "/" + index + "/_search"); - countRequest.addParameter("size", "0"); - refreshAllIndices(); - Map countResponse = entityAsMap(client().performRequest(countRequest)); - assertTotalHits(count, countResponse); - - if (false == isRunningAgainstOldCluster()) { - boolean restoredFromTranslog = false; - boolean foundPrimary = false; - Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index); - recoveryRequest.addParameter("h", "index,shard,type,stage,translog_ops_recovered"); - recoveryRequest.addParameter("s", "index,shard,type"); - String recoveryResponse = toStr(client().performRequest(recoveryRequest)); - for (String line : recoveryResponse.split("\n")) { - // Find the primaries - foundPrimary = true; - if (false == line.contains("done") && line.contains("existing_store")) { - continue; - } - /* Mark if we see a primary that looked like it restored from the translog. - * Not all primaries will look like this all the time because we modify - * random documents when we want there to be a translog and they might - * not be spread around all the shards. */ - Matcher m = Pattern.compile("(\\d+)$").matcher(line); - assertTrue(line, m.find()); - int translogOps = Integer.parseInt(m.group(1)); - if (translogOps > 0) { - restoredFromTranslog = true; - } - } - assertTrue("expected to find a primary but didn't\n" + recoveryResponse, foundPrimary); - assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); - - String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); - String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString(); - String minCompatibleBWCVersion = Version.CURRENT.minimumCompatibilityVersion().luceneVersion.toString(); - if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { - int numCurrentVersion = 0; - int numBwcVersion = 0; - Request segmentsRequest = new Request("GET", "/_cat/segments/" + index); - segmentsRequest.addParameter("h", "prirep,shard,index,version"); - segmentsRequest.addParameter("s", "prirep,shard,index"); - String segmentsResponse = toStr(client().performRequest(segmentsRequest)); - for (String line : segmentsResponse.split("\n")) { - if (false == line.startsWith("p")) { - continue; - } - Matcher m = Pattern.compile("(\\d+\\.\\d+\\.\\d+)$").matcher(line); - assertTrue(line, m.find()); - String version = m.group(1); - if (currentLuceneVersion.equals(version)) { - numCurrentVersion++; - } else if (bwcLuceneVersion.equals(version)) { - numBwcVersion++; - } else if (minCompatibleBWCVersion.equals(version) && minCompatibleBWCVersion.equals(bwcLuceneVersion) == false) { - // Our upgrade path from 7.non-last always goes through 7.last, which depending on timing can create 7.last - // index segment. We ignore those. - continue; - } else { - fail("expected version to be one of [" + currentLuceneVersion + "," + bwcLuceneVersion + "] but was " + line); - } - } - assertNotEquals( - "expected at least 1 current segment after translog recovery. segments:\n" + segmentsResponse, - 0, - numCurrentVersion - ); - assertNotEquals("expected at least 1 old segment. segments:\n" + segmentsResponse, 0, numBwcVersion); - } - } - } - - /** - * Tests snapshot/restore by creating a snapshot and restoring it. It takes - * a snapshot on the old cluster and restores it on the old cluster as a - * sanity check and on the new cluster as an upgrade test. It also takes a - * snapshot on the new cluster and restores that on the new cluster as a - * test that the repository is ok with containing snapshot from both the - * old and new versions. All of the snapshots include an index, a template, - * and some routing configuration. - */ - public void testSnapshotRestore() throws IOException { - int count; - if (isRunningAgainstOldCluster()) { - // Create the index - count = between(200, 300); - Settings.Builder settings = Settings.builder(); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); - } - createIndex(index, settings.build()); - indexRandomDocuments(count, true, true, randomBoolean(), i -> jsonBuilder().startObject().field("field", "value").endObject()); - } else { - count = countOfIndexedRandomDocuments(); - } - - // Refresh the index so the count doesn't fail - refreshAllIndices(); - - // Count the documents in the index to make sure we have as many as we put there - Request countRequest = new Request("GET", "/" + index + "/_search"); - countRequest.addParameter("size", "0"); - Map countResponse = entityAsMap(client().performRequest(countRequest)); - assertTotalHits(count, countResponse); - - // Stick a routing attribute into to cluster settings so we can see it after the restore - Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); - addRoutingSettings.setJsonEntity(formatted(""" - {"persistent": {"cluster.routing.allocation.exclude.test_attr": "%s"}} - """, getOldClusterVersion())); - client().performRequest(addRoutingSettings); - - // Stick a template into the cluster so we can see it after the restore - XContentBuilder templateBuilder = JsonXContent.contentBuilder().startObject(); - templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template - templateBuilder.startObject("settings"); - { - templateBuilder.field("number_of_shards", 1); - } - templateBuilder.endObject(); - templateBuilder.startObject("mappings"); - { - { - templateBuilder.startObject("_source"); - { - templateBuilder.field("enabled", true); - } - templateBuilder.endObject(); - } - } - templateBuilder.endObject(); - templateBuilder.startObject("aliases"); - { - templateBuilder.startObject("alias1").endObject(); - templateBuilder.startObject("alias2"); - { - templateBuilder.startObject("filter"); - { - templateBuilder.startObject("term"); - { - templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT); - } - templateBuilder.endObject(); - } - templateBuilder.endObject(); - } - templateBuilder.endObject(); - } - templateBuilder.endObject().endObject(); - Request createTemplateRequest = new Request("PUT", "/_template/test_template"); - createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); - createTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); - - client().performRequest(createTemplateRequest); - - if (isRunningAgainstOldCluster()) { - // Create the repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { - repoConfig.field("type", "fs"); - repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); - } - repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); - } - - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); - createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); - client().performRequest(createSnapshot); - - checkSnapshot("old_snap", count, getOldClusterVersion()); - if (false == isRunningAgainstOldCluster()) { - checkSnapshot("new_snap", count, Version.CURRENT); - } - } - - public void testHistoryUUIDIsAdded() throws Exception { - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 1); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - } else { - ensureGreenLongWait(index); - - Request statsRequest = new Request("GET", index + "/_stats"); - statsRequest.addParameter("level", "shards"); - Response response = client().performRequest(statsRequest); - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); - assertThat(shardStats, notNullValue()); - assertThat("Expected stats for 2 shards", shardStats, hasSize(2)); - String globalHistoryUUID = null; - for (Object shard : shardStats) { - final String nodeId = ObjectPath.evaluate(shard, "routing.node"); - final Boolean primary = ObjectPath.evaluate(shard, "routing.primary"); - logger.info("evaluating: {} , {}", ObjectPath.evaluate(shard, "routing"), ObjectPath.evaluate(shard, "commit")); - String historyUUID = ObjectPath.evaluate(shard, "commit.user_data.history_uuid"); - assertThat("no history uuid found on " + nodeId + " (primary: " + primary + ")", historyUUID, notNullValue()); - if (globalHistoryUUID == null) { - globalHistoryUUID = historyUUID; - } else { - assertThat( - "history uuid mismatch on " + nodeId + " (primary: " + primary + ")", - historyUUID, - equalTo(globalHistoryUUID) - ); - } - } - } - } - - public void testSoftDeletes() throws Exception { - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 1); - if (randomBoolean()) { - mappingsAndSettings.field("soft_deletes.enabled", true); - } - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - int numDocs = between(10, 100); - for (int i = 0; i < numDocs; i++) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(doc); - client().performRequest(request); - refreshAllIndices(); - } - client().performRequest(new Request("POST", "/" + index + "/_flush")); - int liveDocs = numDocs; - assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - for (int i = 0; i < numDocs; i++) { - if (randomBoolean()) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(doc); - client().performRequest(request); - } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", "/" + index + "/_doc/" + i)); - liveDocs--; - } - } - refreshAllIndices(); - assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - saveInfoDocument(index + "_doc_count", Integer.toString(liveDocs)); - } else { - int liveDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count")); - assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - } - } - - /** - * This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version, - * it verifies that the index exists and is replicated if the old version supports replication. - */ - public void testClosedIndices() throws Exception { - if (isRunningAgainstOldCluster()) { - createIndex(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()); - ensureGreen(index); - - int numDocs = 0; - if (randomBoolean()) { - numDocs = between(1, 100); - for (int i = 0; i < numDocs; i++) { - final Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject())); - assertOK(client().performRequest(request)); - if (rarely()) { - refreshAllIndices(); - } - } - refreshAllIndices(); - } - - assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - saveInfoDocument(index + "_doc_count", Integer.toString(numDocs)); - closeIndex(index); - } - - if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } - - if (isRunningAgainstOldCluster() == false) { - openIndex(index); - ensureGreen(index); - - final int expectedNumDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count")); - assertTotalHits(expectedNumDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); - } - } - - /** - * Asserts that an index is closed in the cluster state. If `checkRoutingTable` is true, it also asserts - * that the index has started shards. - */ - @SuppressWarnings("unchecked") - private void assertClosedIndex(final String indexName, final boolean checkRoutingTable) throws IOException { - final Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); - - final Map metadata = (Map) XContentMapValues.extractValue("metadata.indices." + indexName, state); - assertThat(metadata, notNullValue()); - assertThat(metadata.get("state"), equalTo("close")); - - final Map blocks = (Map) XContentMapValues.extractValue("blocks.indices." + indexName, state); - assertThat(blocks, notNullValue()); - assertThat(blocks.containsKey(String.valueOf(MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID)), is(true)); - - final Map settings = (Map) XContentMapValues.extractValue("settings", metadata); - assertThat(settings, notNullValue()); - - final Map routingTable = (Map) XContentMapValues.extractValue( - "routing_table.indices." + indexName, - state - ); - if (checkRoutingTable) { - assertThat(routingTable, notNullValue()); - assertThat(Booleans.parseBoolean((String) XContentMapValues.extractValue("index.verified_before_close", settings)), is(true)); - final String numberOfShards = (String) XContentMapValues.extractValue("index.number_of_shards", settings); - assertThat(numberOfShards, notNullValue()); - final int nbShards = Integer.parseInt(numberOfShards); - assertThat(nbShards, greaterThanOrEqualTo(1)); - - for (int i = 0; i < nbShards; i++) { - final Collection> shards = (Collection>) XContentMapValues.extractValue( - "shards." + i, - routingTable - ); - assertThat(shards, notNullValue()); - assertThat(shards.size(), equalTo(2)); - for (Map shard : shards) { - assertThat(XContentMapValues.extractValue("shard", shard), equalTo(i)); - assertThat(XContentMapValues.extractValue("state", shard), equalTo("STARTED")); - assertThat(XContentMapValues.extractValue("index", shard), equalTo(indexName)); - } - } - } else { - assertThat(routingTable, nullValue()); - assertThat(XContentMapValues.extractValue("index.verified_before_close", settings), nullValue()); - } - } - - @SuppressWarnings("unchecked") - private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { - // Check the snapshot metadata, especially the version - Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); - Map snapResponse = entityAsMap(client().performRequest(listSnapshotRequest)); - - assertEquals(singletonList(snapshotName), XContentMapValues.extractValue("snapshots.snapshot", snapResponse)); - assertEquals(singletonList("SUCCESS"), XContentMapValues.extractValue("snapshots.state", snapResponse)); - assertEquals(singletonList(tookOnVersion.toString()), XContentMapValues.extractValue("snapshots.version", snapResponse)); - - // Remove the routing setting and template so we can test restoring them. - Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); - clearRoutingFromSettings.setJsonEntity(""" - {"persistent":{"cluster.routing.allocation.exclude.test_attr": null}}"""); - client().performRequest(clearRoutingFromSettings); - client().performRequest(new Request("DELETE", "/_template/test_template")); - - // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("include_global_state", true); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored_" + index); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshotName + "/_restore"); - restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); - client().performRequest(restoreRequest); - - // Make sure search finds all documents - Request countRequest = new Request("GET", "/restored_" + index + "/_search"); - countRequest.addParameter("size", "0"); - Map countResponse = entityAsMap(client().performRequest(countRequest)); - assertTotalHits(count, countResponse); - - // Add some extra documents to the index to be sure we can still write to it after restoring it - int extras = between(1, 100); - StringBuilder bulk = new StringBuilder(); - for (int i = 0; i < extras; i++) { - bulk.append(formatted(""" - {"index":{"_id":"%s"}} - {"test":"test"} - """, count + i)); - } - - Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/_bulk"); - - writeToRestoredRequest.addParameter("refresh", "true"); - writeToRestoredRequest.setJsonEntity(bulk.toString()); - assertThat(EntityUtils.toString(client().performRequest(writeToRestoredRequest).getEntity()), containsString("\"errors\":false")); - - // And count to make sure the add worked - // Make sure search finds all documents - Request countAfterWriteRequest = new Request("GET", "/restored_" + index + "/_search"); - countAfterWriteRequest.addParameter("size", "0"); - Map countAfterResponse = entityAsMap(client().performRequest(countRequest)); - assertTotalHits(count + extras, countAfterResponse); - - // Clean up the index for the next iteration - client().performRequest(new Request("DELETE", "/restored_*")); - - // Check settings added by the restore process - Request clusterSettingsRequest = new Request("GET", "/_cluster/settings"); - clusterSettingsRequest.addParameter("flat_settings", "true"); - Map clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest)); - @SuppressWarnings("unchecked") - final Map persistentSettings = (Map) clusterSettingsResponse.get("persistent"); - assertThat(persistentSettings.get("cluster.routing.allocation.exclude.test_attr"), equalTo(getOldClusterVersion().toString())); - - // Check that the template was restored successfully - Request getTemplateRequest = new Request("GET", "/_template/test_template"); - - Map getTemplateResponse = entityAsMap(client().performRequest(getTemplateRequest)); - Map expectedTemplate = new HashMap<>(); - expectedTemplate.put("index_patterns", singletonList("evil_*")); - - expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); - expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); - - expectedTemplate.put("order", 0); - Map aliases = new HashMap<>(); - aliases.put("alias1", emptyMap()); - aliases.put("alias2", singletonMap("filter", singletonMap("term", singletonMap("version", tookOnVersion.toString())))); - expectedTemplate.put("aliases", aliases); - expectedTemplate = singletonMap("test_template", expectedTemplate); - if (false == expectedTemplate.equals(getTemplateResponse)) { - NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); - builder.compareMaps(getTemplateResponse, expectedTemplate); - logger.info("expected: {}\nactual:{}", expectedTemplate, getTemplateResponse); - fail("template doesn't match:\n" + builder.toString()); - } - } - - private void indexRandomDocuments( - final int count, - final boolean flushAllowed, - final boolean saveInfo, - final boolean specifyId, - final CheckedFunction docSupplier - ) throws IOException { - logger.info("Indexing {} random documents", count); - for (int i = 0; i < count; i++) { - logger.debug("Indexing document [{}]", i); - Request createDocument = new Request("POST", "/" + index + "/_doc/" + (specifyId ? i : "")); - createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); - client().performRequest(createDocument); - if (rarely()) { - refreshAllIndices(); - } - if (flushAllowed && rarely()) { - logger.debug("Flushing [{}]", index); - client().performRequest(new Request("POST", "/" + index + "/_flush")); - } - } - if (saveInfo) { - saveInfoDocument(index + "_count", Integer.toString(count)); - } - } - - private void indexDocument(String id) throws IOException { - final Request indexRequest = new Request("POST", "/" + index + "/" + "_doc/" + id); - indexRequest.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("f", "v").endObject())); - assertOK(client().performRequest(indexRequest)); - } - - private int countOfIndexedRandomDocuments() throws IOException { - return Integer.parseInt(loadInfoDocument(index + "_count")); - } - - private void saveInfoDocument(String id, String value) throws IOException { - XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); - infoDoc.field("value", value); - infoDoc.endObject(); - // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/_doc/" + id); - request.addParameter("op_type", "create"); - request.setJsonEntity(Strings.toString(infoDoc)); - client().performRequest(request); - } - - private String loadInfoDocument(String id) throws IOException { - Request request = new Request("GET", "/info/_doc/" + id); - request.addParameter("filter_path", "_source"); - String doc = toStr(client().performRequest(request)); - Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); - assertTrue(doc, m.find()); - return m.group(1); - } - - private List dataNodes(String indexName, RestClient client) throws IOException { - Request request = new Request("GET", indexName + "/_stats"); - request.addParameter("level", "shards"); - Response response = client.performRequest(request); - List nodes = new ArrayList<>(); - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + indexName + ".shards.0"); - for (Object shard : shardStats) { - final String nodeId = ObjectPath.evaluate(shard, "routing.node"); - nodes.add(nodeId); - } - return nodes; - } - - /** - * Wait for an index to have green health, waiting longer than - * {@link ESRestTestCase#ensureGreen}. - */ - protected void ensureGreenLongWait(String indexName) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + indexName); - request.addParameter("timeout", "2m"); - request.addParameter("wait_for_status", "green"); - request.addParameter("wait_for_no_relocating_shards", "true"); - request.addParameter("wait_for_events", "languid"); - request.addParameter("level", "shards"); - Map healthRsp = entityAsMap(client().performRequest(request)); - logger.info("health api response: {}", healthRsp); - assertEquals("green", healthRsp.get("status")); - assertFalse((Boolean) healthRsp.get("timed_out")); - } - - public void testPeerRecoveryRetentionLeases() throws Exception { - if (isRunningAgainstOldCluster()) { - XContentBuilder settings = jsonBuilder(); - settings.startObject(); - { - settings.startObject("settings"); - settings.field("number_of_shards", between(1, 5)); - settings.field("number_of_replicas", between(0, 1)); - settings.endObject(); - } - settings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(settings)); - client().performRequest(createIndex); - } - ensureGreen(index); - ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); - } - - /** - * Tests that with or without soft-deletes, we should perform an operation-based recovery if there were some - * but not too many uncommitted documents (i.e., less than 10% of committed documents or the extra translog) - * before we restart the cluster. This is important when we move from translog based to retention leases based - * peer recoveries. - */ - public void testOperationBasedRecovery() throws Exception { - if (isRunningAgainstOldCluster()) { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); - } - final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; - createIndex(index, settings.build(), mappings); - ensureGreen(index); - int committedDocs = randomIntBetween(100, 200); - for (int i = 0; i < committedDocs; i++) { - indexDocument(Integer.toString(i)); - if (rarely()) { - flush(index, randomBoolean()); - } - } - flush(index, true); - ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); - // less than 10% of the committed docs (see IndexSetting#FILE_BASED_RECOVERY_THRESHOLD_SETTING). - int uncommittedDocs = randomIntBetween(0, (int) (committedDocs * 0.1)); - for (int i = 0; i < uncommittedDocs; i++) { - final String id = Integer.toString(randomIntBetween(1, 100)); - indexDocument(id); - } - } else { - ensureGreen(index); - assertNoFileBasedRecovery(index, n -> true); - ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); - } - } - - /** - * Verifies that once all shard copies on the new version, we should turn off the translog retention for indices with soft-deletes. - */ - public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { - if (isRunningAgainstOldCluster()) { - createIndex( - index, - Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .build() - ); - ensureGreen(index); - int numDocs = randomIntBetween(10, 100); - for (int i = 0; i < numDocs; i++) { - indexDocument(Integer.toString(randomIntBetween(1, 100))); - if (rarely()) { - flush(index, randomBoolean()); - } - } - } else { - ensureGreen(index); - flush(index, true); - assertEmptyTranslog(index); - ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); - } - } - - public void testResize() throws Exception { - int numDocs; - if (isRunningAgainstOldCluster()) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1); - if (minimumNodeVersion().before(Version.V_8_0_0) && randomBoolean()) { - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); - } - final String mappings = randomBoolean() ? "\"_source\": { \"enabled\": false}" : null; - createIndex(index, settings.build(), mappings); - numDocs = randomIntBetween(10, 1000); - for (int i = 0; i < numDocs; i++) { - indexDocument(Integer.toString(i)); - if (rarely()) { - flush(index, randomBoolean()); - } - } - saveInfoDocument("num_doc_" + index, Integer.toString(numDocs)); - ensureGreen(index); - } else { - ensureGreen(index); - numDocs = Integer.parseInt(loadInfoDocument("num_doc_" + index)); - int moreDocs = randomIntBetween(0, 100); - for (int i = 0; i < moreDocs; i++) { - indexDocument(Integer.toString(numDocs + i)); - if (rarely()) { - flush(index, randomBoolean()); - } - } - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); - { - final String target = index + "_shrunken"; - Request shrinkRequest = new Request("PUT", "/" + index + "/_shrink/" + target); - Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); - if (randomBoolean()) { - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - } - shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); - client().performRequest(shrinkRequest); - ensureGreenLongWait(target); - assertNumHits(target, numDocs + moreDocs, 1); - } - { - final String target = index + "_split"; - Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 6); - if (randomBoolean()) { - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - } - Request splitRequest = new Request("PUT", "/" + index + "/_split/" + target); - splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); - client().performRequest(splitRequest); - ensureGreenLongWait(target); - assertNumHits(target, numDocs + moreDocs, 6); - } - { - final String target = index + "_cloned"; - client().performRequest(new Request("PUT", "/" + index + "/_clone/" + target)); - ensureGreenLongWait(target); - assertNumHits(target, numDocs + moreDocs, 3); - } - } - } - - @SuppressWarnings("unchecked") - public void testSystemIndexMetadataIsUpgraded() throws Exception { - assumeTrue(".tasks became a system index in 7.10.0", getOldClusterVersion().onOrAfter(Version.V_7_10_0)); - final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " - + "access to system indices will be prevented by default"; - if (isRunningAgainstOldCluster()) { - // create index - Request createTestIndex = new Request("PUT", "/test_index_old"); - createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); - client().performRequest(createTestIndex); - - Request bulk = new Request("POST", "/_bulk"); - bulk.addParameter("refresh", "true"); - bulk.setJsonEntity(""" - {"index": {"_index": "test_index_old"}} - {"f1": "v1", "f2": "v2"} - """); - client().performRequest(bulk); - - // start a async reindex job - Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity(""" - { - "source":{ - "index":"test_index_old" - }, - "dest":{ - "index":"test_index_reindex" - } - }"""); - reindex.addParameter("wait_for_completion", "false"); - Map response = entityAsMap(client().performRequest(reindex)); - String taskId = (String) response.get("task"); - - // wait for task - Request getTask = new Request("GET", "/_tasks/" + taskId); - getTask.addParameter("wait_for_completion", "true"); - client().performRequest(getTask); - - // make sure .tasks index exists - Request getTasksIndex = new Request("GET", "/.tasks"); - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { - v.current(systemIndexWarning); - v.compatible(systemIndexWarning); - })); - getTasksIndex.addParameter("allow_no_indices", "false"); - - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { - v.current(systemIndexWarning); - v.compatible(systemIndexWarning); - })); - assertBusy(() -> { - try { - assertThat(client().performRequest(getTasksIndex).getStatusLine().getStatusCode(), is(200)); - } catch (ResponseException e) { - throw new AssertionError(".tasks index does not exist yet"); - } - }); - - // If we are on 7.x create an alias that includes both a system index and a non-system index so we can be sure it gets - // upgraded properly. If we're already on 8.x, skip this part of the test. - if (minimumNodeVersion().before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { - // Create an alias to make sure it gets upgraded properly - Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity(""" - { - "actions": [ - {"add": {"index": ".tasks", "alias": "test-system-alias"}}, - {"add": {"index": "test_index_reindex", "alias": "test-system-alias"}} - ] - }"""); - putAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { - v.current(systemIndexWarning); - v.compatible(systemIndexWarning); - })); - assertThat(client().performRequest(putAliasRequest).getStatusLine().getStatusCode(), is(200)); - } - } else { - assertBusy(() -> { - Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); - Map indices = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))) - .get("metadata.indices"); - - // Make sure our non-system index is still non-system - assertThat(new XContentTestUtils.JsonMapView(indices).get("test_index_old.system"), is(false)); - - // Can't get the .tasks index via JsonMapView because it splits on `.` - assertThat(indices, hasKey(".tasks")); - XContentTestUtils.JsonMapView tasksIndex = new XContentTestUtils.JsonMapView((Map) indices.get(".tasks")); - assertThat(tasksIndex.get("system"), is(true)); - - // If .tasks was created in a 7.x version, it should have an alias on it that we need to make sure got upgraded properly. - final String tasksCreatedVersionString = tasksIndex.get("settings.index.version.created"); - assertThat(tasksCreatedVersionString, notNullValue()); - final Version tasksCreatedVersion = Version.fromId(Integer.parseInt(tasksCreatedVersionString)); - if (tasksCreatedVersion.before(SYSTEM_INDEX_ENFORCEMENT_VERSION)) { - // Verify that the alias survived the upgrade - Request getAliasRequest = new Request("GET", "/_alias/test-system-alias"); - getAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { - v.current(systemIndexWarning); - v.compatible(systemIndexWarning); - })); - Map aliasResponse = entityAsMap(client().performRequest(getAliasRequest)); - assertThat(aliasResponse, hasKey(".tasks")); - assertThat(aliasResponse, hasKey("test_index_reindex")); - } - }); - } - } - - public void testEnableSoftDeletesOnRestore() throws Exception { - assumeTrue("soft deletes must be enabled on 8.0+", getOldClusterVersion().before(Version.V_8_0_0)); - final String snapshot = "snapshot-" + index; - if (isRunningAgainstOldCluster()) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1); - settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); - createIndex(index, settings.build()); - ensureGreen(index); - int numDocs = randomIntBetween(0, 100); - indexRandomDocuments( - numDocs, - true, - true, - randomBoolean(), - i -> jsonBuilder().startObject().field("field", "value").endObject() - ); - // create repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { - repoConfig.field("type", "fs"); - repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); - } - repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); - // create snapshot - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); - createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); - client().performRequest(createSnapshot); - } else { - String restoredIndex = "restored-" + index; - // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", restoredIndex); - restoreCommand.startObject("index_settings"); - { - restoreCommand.field("index.soft_deletes.enabled", true); - } - restoreCommand.endObject(); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); - restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); - client().performRequest(restoreRequest); - ensureGreen(restoredIndex); - int numDocs = countOfIndexedRandomDocuments(); - assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + restoredIndex + "/_search")))); - } - } - - public void testForbidDisableSoftDeletesOnRestore() throws Exception { - final String snapshot = "snapshot-" + index; - if (isRunningAgainstOldCluster()) { - final Settings.Builder settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - createIndex(index, settings.build()); - ensureGreen(index); - int numDocs = randomIntBetween(0, 100); - indexRandomDocuments( - numDocs, - true, - true, - randomBoolean(), - i -> jsonBuilder().startObject().field("field", "value").endObject() - ); - // create repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { - repoConfig.field("type", "fs"); - repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", System.getProperty("tests.path.repo")); - } - repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); - // create snapshot - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); - createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); - client().performRequest(createSnapshot); - } else { - // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored-" + index); - restoreCommand.startObject("index_settings"); - { - restoreCommand.field("index.soft_deletes.enabled", false); - } - restoreCommand.endObject(); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); - restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); - final ResponseException error = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); - assertThat(error.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); - } - } - - /** - * In 7.14 the cluster.remote.*.transport.compress setting was change from a boolean to an enum setting - * with true/false as options. This test ensures that the old boolean setting in cluster state is - * translated properly. This test can be removed in 9.0. - */ - public void testTransportCompressionSetting() throws IOException { - assumeTrue("the old transport.compress setting existed before 7.14", getOldClusterVersion().before(Version.V_7_14_0)); - assumeTrue( - "Early versions of 6.x do not have cluster.remote* prefixed settings", - getOldClusterVersion().onOrAfter(Version.V_7_14_0.minimumCompatibilityVersion()) - ); - if (isRunningAgainstOldCluster()) { - final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - { - builder.startObject("persistent"); - { - builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); - builder.field("cluster.remote.foo.transport.compress", "true"); - } - builder.endObject(); - } - builder.endObject(); - putSettingsRequest.setJsonEntity(Strings.toString(builder)); - } - client().performRequest(putSettingsRequest); - } else { - final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); - final Response getSettingsResponse = client().performRequest(getSettingsRequest); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) { - final Settings settings = RestClusterGetSettingsResponse.fromXContent(parser).getPersistentSettings(); - assertThat(REMOTE_CLUSTER_COMPRESS.getConcreteSettingForNamespace("foo").get(settings), equalTo(Compression.Enabled.TRUE)); - } - } - } - - public static void assertNumHits(String index, int numHits, int totalShards) throws IOException { - Map resp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); - assertNoFailures(resp); - assertThat(XContentMapValues.extractValue("_shards.total", resp), equalTo(totalShards)); - assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); - assertThat(extractTotalHits(resp), equalTo(numHits)); - } -} diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java deleted file mode 100644 index 34ae95d23b03..000000000000 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.upgrades; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.Fuzziness; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.ConstantScoreQueryBuilder; -import org.elasticsearch.index.query.DisMaxQueryBuilder; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.index.query.MatchNoneQueryBuilder; -import org.elasticsearch.index.query.MatchPhraseQueryBuilder; -import org.elasticsearch.index.query.MatchQueryBuilder; -import org.elasticsearch.index.query.Operator; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.RangeQueryBuilder; -import org.elasticsearch.index.query.SpanNearQueryBuilder; -import org.elasticsearch.index.query.SpanTermQueryBuilder; -import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; -import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.xcontent.XContentBuilder; - -import java.io.ByteArrayInputStream; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Base64; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; - -/** - * An integration test that tests whether percolator queries stored in older supported ES version can still be read by the - * current ES version. Percolator queries are stored in the binary format in a dedicated doc values field (see - * PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test - * does best effort verifying that we don't break bwc for query builders between the first previous major version and - * the latest current major release. - * - * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the - * json format of a query being tested here then feel free to change this. - */ -public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase { - - private static final List CANDIDATES = new ArrayList<>(); - - static { - addCandidate(""" - "match": { "text_field": "value"} - """, new MatchQueryBuilder("text_field", "value")); - addCandidate(""" - "match": { "text_field": {"query": "value", "operator": "and"} } - """, new MatchQueryBuilder("text_field", "value").operator(Operator.AND)); - addCandidate(""" - "match": { "text_field": {"query": "value", "analyzer": "english"} } - """, new MatchQueryBuilder("text_field", "value").analyzer("english")); - addCandidate(""" - "match": { "text_field": {"query": "value", "minimum_should_match": 3} } - """, new MatchQueryBuilder("text_field", "value").minimumShouldMatch("3")); - addCandidate(""" - "match": { "text_field": {"query": "value", "fuzziness": "auto"} } - """, new MatchQueryBuilder("text_field", "value").fuzziness(Fuzziness.AUTO)); - addCandidate(""" - "match_phrase": { "text_field": "value"} - """, new MatchPhraseQueryBuilder("text_field", "value")); - addCandidate(""" - "match_phrase": { "text_field": {"query": "value", "slop": 3}} - """, new MatchPhraseQueryBuilder("text_field", "value").slop(3)); - addCandidate(""" - "range": { "long_field": {"gte": 1, "lte": 9}} - """, new RangeQueryBuilder("long_field").from(1).to(9)); - addCandidate( - """ - "bool": { "must_not": [{"match_none": {}}], "must": [{"match_all": {}}], "filter": [{"match_all": {}}], \ - "should": [{"match_all": {}}]} - """, - new BoolQueryBuilder().mustNot(new MatchNoneQueryBuilder()) - .must(new MatchAllQueryBuilder()) - .filter(new MatchAllQueryBuilder()) - .should(new MatchAllQueryBuilder()) - ); - addCandidate( - """ - "dis_max": {"queries": [{"match_all": {}},{"match_all": {}},{"match_all": {}}], "tie_breaker": 0.01} - """, - new DisMaxQueryBuilder().add(new MatchAllQueryBuilder()) - .add(new MatchAllQueryBuilder()) - .add(new MatchAllQueryBuilder()) - .tieBreaker(0.01f) - ); - addCandidate(""" - "constant_score": {"filter": {"match_all": {}}, "boost": 0.1} - """, new ConstantScoreQueryBuilder(new MatchAllQueryBuilder()).boost(0.1f)); - addCandidate( - """ - "function_score": {"query": {"match_all": {}},"functions": [{"random_score": {}, "filter": {"match_all": {}}, \ - "weight": 0.2}]} - """, - new FunctionScoreQueryBuilder( - new MatchAllQueryBuilder(), - new FunctionScoreQueryBuilder.FilterFunctionBuilder[] { - new FunctionScoreQueryBuilder.FilterFunctionBuilder( - new MatchAllQueryBuilder(), - new RandomScoreFunctionBuilder().setWeight(0.2f) - ) } - ) - ); - addCandidate( - """ - "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ - { "span_term": { "keyword_field": "value2" }}]} - """, - new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 0).addClause( - new SpanTermQueryBuilder("keyword_field", "value2") - ) - ); - addCandidate( - """ - "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ - { "span_term": { "keyword_field": "value2" }}], "slop": 2} - """, - new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 2).addClause( - new SpanTermQueryBuilder("keyword_field", "value2") - ) - ); - addCandidate( - """ - "span_near": {"clauses": [{ "span_term": { "keyword_field": "value1" }}, \ - { "span_term": { "keyword_field": "value2" }}], "slop": 2, "in_order": false} - """, - new SpanNearQueryBuilder(new SpanTermQueryBuilder("keyword_field", "value1"), 2).addClause( - new SpanTermQueryBuilder("keyword_field", "value2") - ).inOrder(false) - ); - } - - private static void addCandidate(String querySource, QueryBuilder expectedQb) { - CANDIDATES.add(new Object[] { "{\"query\": {" + querySource + "}}", expectedQb }); - } - - public void testQueryBuilderBWC() throws Exception { - String index = "queries"; - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 0); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("query"); - mappingsAndSettings.field("type", "percolator"); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("keyword_field"); - mappingsAndSettings.field("type", "keyword"); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("text_field"); - mappingsAndSettings.field("type", "text"); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("long_field"); - mappingsAndSettings.field("type", "long"); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request request = new Request("PUT", "/" + index); - request.setJsonEntity(Strings.toString(mappingsAndSettings)); - Response rsp = client().performRequest(request); - assertEquals(200, rsp.getStatusLine().getStatusCode()); - - for (int i = 0; i < CANDIDATES.size(); i++) { - request = new Request("PUT", "/" + index + "/_doc/" + Integer.toString(i)); - request.setJsonEntity((String) CANDIDATES.get(i)[0]); - rsp = client().performRequest(request); - assertEquals(201, rsp.getStatusLine().getStatusCode()); - } - } else { - NamedWriteableRegistry registry = new NamedWriteableRegistry( - new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables() - ); - - for (int i = 0; i < CANDIDATES.size(); i++) { - QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; - Request request = new Request("GET", "/" + index + "/_search"); - request.setJsonEntity(formatted(""" - {"query": {"ids": {"values": ["%s"]}}, "docvalue_fields": [{"field":"query.query_builder_field"}]} - """, i)); - Response rsp = client().performRequest(request); - assertEquals(200, rsp.getStatusLine().getStatusCode()); - var hitRsp = (Map) ((List) ((Map) responseAsMap(rsp).get("hits")).get("hits")).get(0); - String queryBuilderStr = (String) ((List) ((Map) hitRsp.get("fields")).get("query.query_builder_field")).get(0); - byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr); - try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) { - try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { - input.setVersion(getOldClusterVersion()); - QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); - assert in.read() == -1; - assertEquals(expectedQueryBuilder, queryBuilder); - } - } - } - } - } -} diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java index d7af475e15dc..e4fe90118bd1 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/ESJsonLayoutTests.java @@ -7,12 +7,11 @@ */ package org.elasticsearch.common.logging; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; import org.junit.BeforeClass; -import java.util.Locale; - public class ESJsonLayoutTests extends ESTestCase { @BeforeClass public static void initNodeName() { @@ -27,7 +26,7 @@ public void testLayout() { ESJsonLayout server = ESJsonLayout.newBuilder().setType("server").build(); String conversionPattern = server.getPatternLayout().getConversionPattern(); - assertThat(conversionPattern, Matchers.equalTo(String.format(Locale.ROOT, """ + assertThat(conversionPattern, Matchers.equalTo(Strings.format(""" {\ "type": "server", \ "timestamp": "%%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}", \ @@ -45,7 +44,7 @@ public void testLayoutWithAdditionalFieldOverride() { String conversionPattern = server.getPatternLayout().getConversionPattern(); // message field is removed as is expected to be provided by a field from a message - assertThat(conversionPattern, Matchers.equalTo(String.format(Locale.ROOT, """ + assertThat(conversionPattern, Matchers.equalTo(Strings.format(""" {\ "type": "server", \ "timestamp": "%%d{yyyy-MM-dd'T'HH:mm:ss,SSSZZ}", \ diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java index 4ca8c8770adb..a33fc01d8446 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/RareTermsIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Strings; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matchers; @@ -30,10 +31,11 @@ private int indexDocs(int numDocs, int id) throws Exception { final Request request = new Request("POST", "/_bulk"); final StringBuilder builder = new StringBuilder(); for (int i = 0; i < numDocs; ++i) { - builder.append(formatted(""" + Object[] args = new Object[] { index, id++, i }; + builder.append(Strings.format(""" { "index" : { "_index" : "%s", "_id": "%s" } } {"str_value" : "s%s"} - """, index, id++, i)); + """, args)); } request.setJsonEntity(builder.toString()); assertOK(client().performRequest(request)); @@ -62,7 +64,7 @@ public void testSingleValuedString() throws Exception { private void assertNumRareTerms(int maxDocs, int rareTerms) throws IOException { final Request request = new Request("POST", index + "/_search"); - request.setJsonEntity(formatted(""" + request.setJsonEntity(Strings.format(""" { "aggs": { "rareTerms": { diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java index 9adec5df72ea..a0f0f1319b40 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/SearchWithMinCompatibleSearchNodeIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; @@ -84,7 +85,7 @@ public void testMinVersionAsNewVersion() throws Exception { ); assertThat(responseException.getMessage(), containsString(""" {"error":{"root_cause":[],"type":"search_phase_execution_exception\"""")); - assertThat(responseException.getMessage(), containsString(formatted(""" + assertThat(responseException.getMessage(), containsString(Strings.format(""" caused_by":{"type":"version_mismatch_exception",\ "reason":"One of the shards is incompatible with the required minimum version [%s]\"""", newVersion))); }); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java index 5703b1c5e2e6..a1b71e217594 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DockerTests.java @@ -1221,4 +1221,21 @@ public void test500Readiness() throws Exception { waitForElasticsearch(installation); assertTrue(readinessProbe(9399)); } + + public void test600Interrupt() { + waitForElasticsearch(installation, "elastic", PASSWORD); + final Result containerLogs = getContainerLogs(); + + assertThat("Container logs should contain starting ...", containerLogs.stdout(), containsString("starting ...")); + + final List infos = ProcessInfo.getProcessInfo(sh, "java"); + final int maxPid = infos.stream().map(i -> i.pid()).max(Integer::compareTo).get(); + + sh.run("bash -c 'kill -int " + maxPid + "'"); // send ctrl+c to all java processes + final Result containerLogsAfter = getContainerLogs(); + + assertThat("Container logs should contain stopping ...", containerLogsAfter.stdout(), containsString("stopping ...")); + assertThat("No errors stdout", containerLogsAfter.stdout(), not(containsString("java.security.AccessControlException:"))); + assertThat("No errors stderr", containerLogsAfter.stderr(), not(containsString("java.security.AccessControlException:"))); + } } diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 0a83c293dca0..59741fd105b6 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -280,7 +280,7 @@ private static void createSnapshot(String repoName, String name, String index) t private void createIndex(String name, int shards) throws IOException { final Request putIndexRequest = new Request("PUT", "/" + name); - putIndexRequest.setJsonEntity(formatted(""" + putIndexRequest.setJsonEntity(Strings.format(""" { "settings" : { "index" : { diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 2de2786cfa92..d3078dd8c938 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -8,6 +8,7 @@ import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -38,7 +39,9 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> setting 'repositories.url.allowed_urls', 'http://snapshot.test*' setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + setting 'logger.org.elasticsearch.cluster.service.MasterService', 'TRACE' setting 'logger.org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator', 'TRACE' + setting 'logger.org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders', 'TRACE' requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") } @@ -51,10 +54,19 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") } + def excludeList = [] systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', oldVersion nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.clustername', baseName) + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["old_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["old_cluster/30_vector_search/Create indexed byte vectors and search"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } } tasks.register("${baseName}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { @@ -68,6 +80,15 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> systemProperty 'tests.first_round', 'true' nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.clustername', baseName) + def excludeList = [] + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/Search byte indices created in old cluster"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } } tasks.register("${baseName}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { @@ -81,6 +102,15 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> systemProperty 'tests.first_round', 'false' nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.clustername', baseName) + def excludeList = [] + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/Search byte indices created in old cluster"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } } tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { @@ -93,6 +123,15 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> systemProperty 'tests.upgrade_from_version', oldVersion nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.clustername', baseName) + def excludeList = [] + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["upgraded_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["upgraded_cluster/30_vector_search/Search byte indices created in old cluster"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } } tasks.register(bwcTaskName(bwcVersion)) { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index e0c42429f71c..2cbfe030be09 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -43,9 +43,9 @@ public void testUpgradeDesiredNodes() throws Exception { return; } - if (UPGRADE_FROM_VERSION.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + if (UPGRADE_FROM_VERSION.transportVersion.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (UPGRADE_FROM_VERSION.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + } else if (UPGRADE_FROM_VERSION.transportVersion.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index ee94757e3ab2..b860e53d447b 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -135,7 +135,7 @@ public void testIndexing() throws IOException { public void testAutoIdWithOpTypeCreate() throws IOException { final String indexName = "auto_id_and_op_type_create_index"; - String b = formatted(""" + String b = Strings.format(""" {"create": {"_index": "%s"}} {"f1": "v"} """, indexName); @@ -325,7 +325,7 @@ private void tsdbBulk(StringBuilder bulk, String dim, long timeStart, long timeE long delta = TimeUnit.SECONDS.toMillis(20); double value = (timeStart - TSDB_TIMES[0]) / TimeUnit.SECONDS.toMillis(20) * rate; for (long t = timeStart; t < timeEnd; t += delta) { - bulk.append(formatted(""" + bulk.append(Strings.format(""" {"index": {"_index": "tsdb"}} {"@timestamp": %s, "dim": "%s", "value": %s} """, t, dim, value)); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 330b0e485d6b..5e0137e14308 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -40,6 +40,7 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; +import static org.elasticsearch.upgrades.UpgradeWithOldIndexSettingsIT.updateIndexSettingsPermittingSlowlogDeprecationWarning; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; @@ -236,7 +237,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node - updateIndexSettings( + updateIndexSettingsPermittingSlowlogDeprecationWarning( index, Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) @@ -246,7 +247,10 @@ public void testRelocationWithConcurrentIndexing() throws Exception { ); ensureGreen(index); // wait for the primary to be assigned ensureNoInitializingShards(); // wait for all other shard activity to finish - updateIndexSettings(index, Settings.builder().put("index.routing.allocation.include._id", newNode)); + updateIndexSettingsPermittingSlowlogDeprecationWarning( + index, + Settings.builder().put("index.routing.allocation.include._id", newNode) + ); asyncIndexDocs(index, 10, 50).get(); // ensure the relocation from old node to new node has occurred; otherwise ensureGreen can // return true even though shards haven't moved to the new node yet (allocation was throttled). diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index ba61b703f12a..07afe0cc5693 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -31,6 +31,7 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; +import static org.elasticsearch.upgrades.UpgradeWithOldIndexSettingsIT.updateIndexSettingsPermittingSlowlogDeprecationWarning; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; @@ -40,6 +41,8 @@ import static org.hamcrest.Matchers.notNullValue; public class SnapshotBasedRecoveryIT extends AbstractRollingTestCase { + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/93271") public void testSnapshotBasedRecovery() throws Exception { final String indexName = "snapshot_based_recovery"; final String repositoryName = "snapshot_based_recovery_repo"; @@ -76,8 +79,12 @@ public void testSnapshotBasedRecovery() throws Exception { if (upgradedNodeIds.isEmpty() == false) { assertThat(upgradedNodeIds.size(), is(equalTo(1))); String upgradedNodeId = upgradedNodeIds.get(0); + logger.info("--> excluding [{}] from node [{}]", indexName, upgradedNodeId); updateIndexSettings(indexName, Settings.builder().put("index.routing.allocation.exclude._id", upgradedNodeId)); ensureGreen(indexName); + logger.info("--> finished excluding [{}] from node [{}]", indexName, upgradedNodeId); + } else { + logger.info("--> no upgrading nodes, not adding any exclusions for [{}]", indexName); } String primaryNodeId = getPrimaryNodeIdOfShard(indexName, 0); @@ -90,19 +97,37 @@ public void testSnapshotBasedRecovery() throws Exception { // the primary to a node in the old version, this allows adding replicas in the first mixed round. logger.info("--> Primary node in first mixed round {} / {}", primaryNodeId, primaryNodeVersion); if (primaryNodeVersion.after(UPGRADE_FROM_VERSION)) { + logger.info("--> cancelling primary shard on node [{}]", primaryNodeId); cancelShard(indexName, 0, primaryNodeId); + logger.info("--> done cancelling primary shard on node [{}]", primaryNodeId); String currentPrimaryNodeId = getPrimaryNodeIdOfShard(indexName, 0); assertThat(getNodeVersion(currentPrimaryNodeId), is(equalTo(UPGRADE_FROM_VERSION))); } } else { + logger.info("--> not in first upgrade round, removing exclusions for [{}]", indexName); updateIndexSettings(indexName, Settings.builder().putNull("index.routing.allocation.exclude._id")); + logger.info("--> done removing exclusions for [{}]", indexName); } // Drop replicas - updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)); - updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); - ensureGreen(indexName); + logger.info("--> dropping replicas from [{}]", indexName); + updateIndexSettingsPermittingSlowlogDeprecationWarning( + indexName, + Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + ); + logger.info("--> finished dropping replicas from [{}], adding them back", indexName); + updateIndexSettingsPermittingSlowlogDeprecationWarning( + indexName, + Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + ); + logger.info("--> finished adding replicas from [{}]", indexName); + try { + ensureGreen(indexName); + } catch (AssertionError e) { + logAllocationExplain(); + throw e; + } assertMatchAllReturnsAllDocuments(indexName, numDocs); assertMatchQueryReturnsAllDocuments(indexName, numDocs); } @@ -110,6 +135,20 @@ public void testSnapshotBasedRecovery() throws Exception { } } + private void logAllocationExplain() throws Exception { + // Used to debug #91383 + var request = new Request(HttpGet.METHOD_NAME, "_cluster/allocation/explain?include_disk_info=true&include_yes_decisions=true"); + request.setJsonEntity(""" + { + "index": "snapshot_based_recovery", + "shard": 0, + "primary": false + } + """); + var response = client().performRequest(request); + logger.info("--> allocation explain {}", EntityUtils.toString(response.getEntity())); + } + private List getUpgradedNodeIds() throws IOException { Request request = new Request(HttpGet.METHOD_NAME, "_nodes/_all"); Response response = client().performRequest(request); @@ -178,7 +217,7 @@ private void cancelShard(String indexName, int shard, String nodeName) throws IO } builder.endObject(); - Request request = new Request(HttpPost.METHOD_NAME, "/_cluster/reroute?pretty"); + Request request = new Request(HttpPost.METHOD_NAME, "/_cluster/reroute?pretty&metric=none"); request.setJsonEntity(Strings.toString(builder)); Response response = client().performRequest(request); logger.info("--> Relocated primary to an older version {}", EntityUtils.toString(response.getEntity())); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java index a261720e266d..35688e7c244c 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java @@ -12,10 +12,11 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Strings; import java.io.IOException; -import java.util.Locale; import java.util.Map; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; @@ -56,7 +57,7 @@ public void testOldIndexSettings() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { bulk.setOptions(expectWarnings(EXPECTED_WARNING)); } - bulk.setJsonEntity(String.format(Locale.ROOT, """ + bulk.setJsonEntity(Strings.format(""" {"index": {"_index": "%s"}} {"f1": "v1", "f2": "v2"} """, INDEX_NAME)); @@ -69,7 +70,7 @@ public void testOldIndexSettings() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { bulk.setOptions(expectWarnings(EXPECTED_WARNING)); } - bulk.setJsonEntity(String.format(Locale.ROOT, """ + bulk.setJsonEntity(Strings.format(""" {"index": {"_index": "%s"}} {"f1": "v3", "f2": "v4"} """, INDEX_NAME)); @@ -111,4 +112,20 @@ private void assertCount(String index, int countAtLeast) throws IOException { assertTrue(hitsTotal >= countAtLeast); } + + public static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { + Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); + if (UPGRADE_FROM_VERSION.before(Version.V_7_17_9)) { + // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) + // Below warnings are set (and leaking) from an index in this test case + request.setOptions(expectVersionSpecificWarnings(v -> { + v.compatible( + "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version." + ); + })); + } + client().performRequest(request); + } } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml new file mode 100644 index 000000000000..969c4428c7c6 --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml @@ -0,0 +1,142 @@ +--- +"Search float indices created in old cluster": + - do: + search: + index: test-float-index + body: + query: + script_score: + query: { "exists": { "field": "bdv" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: bdv + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 21 } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.2._score: 15 } + + - do: + search: + index: test-float-index + body: + query: + script_score: + query: { "exists": { "field": "knn" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: knn + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._score: 25 } + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._score: 23 } + - do: + search: + index: test-float-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 7 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + - do: + search: + index: test-float-index + body: + knn: + field: "knn" + query_vector: [ 4, 5, 6 ] + k: 3 + num_candidates: 6 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + +--- +"Search byte indices created in old cluster": + - do: + search: + index: test-byte-index + body: + query: + script_score: + query: { "exists": { "field": "bdv" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: bdv + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 21 } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.2._score: 15 } + + - do: + search: + index: test-byte-index + body: + query: + script_score: + query: { "exists": { "field": "knn" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: knn + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._score: 25 } + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._score: 23 } + + - do: + search: + index: test-byte-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 6 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + - do: + search: + index: test-byte-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 7 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml new file mode 100644 index 000000000000..b471fa56a47a --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml @@ -0,0 +1,228 @@ +--- +"Create indexed float vectors and search": + - do: + indices.create: + index: test-float-index + body: + settings: + number_of_shards: "1" + mappings: + properties: + bdv: + type: dense_vector + dims: 3 + knn: + type: dense_vector + dims: 3 + index: true + similarity: l2_norm + - do: + bulk: + index: test-float-index + refresh: true + body: + - '{"index": {"_id": "1"}}' + - '{"bdv": [1, 1, 1], "knn": [1, 1, 1]}' + - '{"index": {"_id": "2"}}' + - '{"bdv": [1, 1, 2], "knn": [1, 1, 2]}' + - '{"index": {"_id": "3"}}' + - '{"bdv": [1, 1, 3], "knn": [1, 1, 3]}' + - '{"index": {"_id": "4"}}' + - '{"knn": [1, 2, 1]}' + - '{"index": {"_id": "5"}}' + - '{"knn": [1, 3, 1]}' + - '{"index": {"_id": "6"}}' + - '{"knn": [2, 1, 1]}' + - '{"index": {"_id": "7"}}' + - '{"knn": [3, 1, 1]}' + - '{"index": {"_id": "missing_vector"}}' + - '{}' + - do: + indices.forcemerge: + index: test-float-index + max_num_segments: 1 + - do: + search: + index: test-float-index + body: + query: + script_score: + query: { "exists": { "field": "bdv" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: bdv + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 21 } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.2._score: 15 } + + - do: + search: + index: test-float-index + body: + query: + script_score: + query: { "exists": { "field": "knn" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: knn + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._score: 25 } + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._score: 23 } + + - do: + search: + index: test-float-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 7 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + - do: + search: + index: test-float-index + body: + knn: + field: "knn" + query_vector: [ 4, 5, 6 ] + k: 3 + num_candidates: 6 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + +--- +"Create indexed byte vectors and search": + - do: + indices.create: + index: test-byte-index + body: + settings: + number_of_shards: "1" + mappings: + properties: + bdv: + type: dense_vector + element_type: byte + dims: 3 + knn: + type: dense_vector + element_type: byte + dims: 3 + index: true + similarity: l2_norm + - do: + bulk: + index: test-byte-index + refresh: true + body: + - '{"index": {"_id": "1"}}' + - '{"bdv": [1, 1, 1], "knn": [1, 1, 1]}' + - '{"index": {"_id": "2"}}' + - '{"bdv": [1, 1, 2], "knn": [1, 1, 2]}' + - '{"index": {"_id": "3"}}' + - '{"bdv": [1, 1, 3], "knn": [1, 1, 3]}' + - '{"index": {"_id": "4"}}' + - '{"knn": [1, 2, 1]}' + - '{"index": {"_id": "5"}}' + - '{"knn": [1, 3, 1]}' + - '{"index": {"_id": "6"}}' + - '{"knn": [2, 1, 1]}' + - '{"index": {"_id": "7"}}' + - '{"knn": [3, 1, 1]}' + - '{"index": {"_id": "missing_vector"}}' + - '{}' + - do: + indices.forcemerge: + index: test-byte-index + max_num_segments: 1 + - do: + search: + index: test-byte-index + body: + query: + script_score: + query: { "exists": { "field": "bdv" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: bdv + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 21 } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.2._score: 15 } + + - do: + search: + index: test-byte-index + body: + query: + script_score: + query: { "exists": { "field": "knn" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: knn + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._score: 25 } + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._score: 23 } + + - do: + search: + index: test-byte-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 6 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + - do: + search: + index: test-byte-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 7 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml new file mode 100644 index 000000000000..60304a0078ac --- /dev/null +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml @@ -0,0 +1,143 @@ +--- +"Search float indices created in old cluster": + - do: + search: + index: test-float-index + body: + query: + script_score: + query: { "exists": { "field": "bdv" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: bdv + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 21 } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.2._score: 15 } + + - do: + search: + index: test-float-index + body: + query: + script_score: + query: { "exists": { "field": "knn" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: knn + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._score: 25 } + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._score: 23 } + + - do: + search: + index: test-float-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 7 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + - do: + search: + index: test-float-index + body: + knn: + field: "knn" + query_vector: [ 4, 5, 6 ] + k: 3 + num_candidates: 6 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + +--- +"Search byte indices created in old cluster": + - do: + search: + index: test-byte-index + body: + query: + script_score: + query: { "exists": { "field": "bdv" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: bdv + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._score: 21 } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.2._score: 15 } + + - do: + search: + index: test-byte-index + body: + query: + script_score: + query: { "exists": { "field": "knn" } } + script: + source: | + field(params.field).get().dotProduct(params.query) + params: + query: [4, 5, 6] + field: knn + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._score: 27 } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.1._score: 25 } + - match: { hits.hits.2._id: "7" } + - match: { hits.hits.2._score: 23 } + + - do: + search: + index: test-byte-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 6 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } + + - do: + search: + index: test-byte-index + body: + knn: + field: "knn" + query_vector: [4, 5, 6] + k: 3 + num_candidates: 7 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "5" } + - match: { hits.hits.2._id: "2" } diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index c08194f7db87..6766823dc21e 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' //apply plugin: 'elasticsearch.test-with-dependencies' dependencies { diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java index a0a4642d25ee..0830cc1f6640 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.http; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Cancellable; @@ -24,10 +24,11 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.util.Collection; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.concurrent.CancellationException; import java.util.function.UnaryOperator; @@ -106,8 +107,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT; } @Override @@ -116,7 +117,7 @@ public void writeTo(StreamOutput out) { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) { + public Iterator toXContentChunked(ToXContent.Params params) { throw new AssertionError("task should have been cancelled before serializing this custom"); } } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java index e0d6a75495de..cd570654b442 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HealthRestCancellationIT.java @@ -47,7 +47,7 @@ protected Collection> nodePlugins() { } public void testHealthRestCancellation() throws Exception { - runTest(new Request(HttpGet.METHOD_NAME, "/_internal/_health")); + runTest(new Request(HttpGet.METHOD_NAME, "/_health_report")); } private void runTest(Request request) throws Exception { @@ -111,7 +111,7 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { try { operationBlock.acquire(); } catch (InterruptedException e) { diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java index 0594191ed3c8..43d7630199bb 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestClusterInfoActionCancellationIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; +import org.hamcrest.Matchers; import java.util.EnumSet; import java.util.concurrent.CancellationException; @@ -35,11 +35,10 @@ import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; import static org.elasticsearch.test.TaskAssertions.assertAllCancellableTasksAreCancelled; import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; -import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefix; +import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefixOnMaster; import static org.hamcrest.core.IsEqual.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -@TestLogging(value = "org.elasticsearch.tasks.TaskManager:TRACE,org.elasticsearch.test.TaskAssertions:TRACE", reason = "debugging") public class RestClusterInfoActionCancellationIT extends HttpSmokeTestCase { public void testGetMappingsCancellation() throws Exception { @@ -77,8 +76,18 @@ private void runTest(String actionName, String endpoint) throws Exception { final Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future)); assertThat(future.isDone(), equalTo(false)); - awaitTaskWithPrefix(actionName); - + awaitTaskWithPrefixOnMaster(actionName); + // To ensure that the task is executing on master, we wait until the first blocked execution of the task registers its cluster state + // observer for further retries. This ensures that a task is not cancelled before we have started its execution, which could result + // in the task being unregistered and the test not being able to find any cancelled tasks. + assertBusy( + () -> assertThat( + internalCluster().getCurrentMasterNodeInstance(ClusterService.class) + .getClusterApplierService() + .getTimeoutClusterStateListenersSize(), + Matchers.greaterThan(0) + ) + ); cancellable.cancel(); assertAllCancellableTasksAreCancelled(actionName); diff --git a/qa/smoke-test-ingest-disabled/build.gradle b/qa/smoke-test-ingest-disabled/build.gradle index ad539bb515e1..5ffefed52b71 100644 --- a/qa/smoke-test-ingest-disabled/build.gradle +++ b/qa/smoke-test-ingest-disabled/build.gradle @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' dependencies { testImplementation project(':modules:ingest-common') diff --git a/qa/smoke-test-ingest-disabled/src/yamlRestTest/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml b/qa/smoke-test-ingest-disabled/src/yamlRestTest/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml index ed3c5f6f9228..0eb64954a45b 100644 --- a/qa/smoke-test-ingest-disabled/src/yamlRestTest/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml +++ b/qa/smoke-test-ingest-disabled/src/yamlRestTest/resources/rest-api-spec/test/ingest_mustache/10_ingest_disabled.yml @@ -28,7 +28,7 @@ - match: { acknowledged: true } --- -"Test ingest simulate API works fine when node.ingest is set to false": +"Test ingest simulate API fails when node.ingest is set to false": - do: ingest.put_pipeline: id: "my_pipeline" @@ -47,6 +47,7 @@ - match: { acknowledged: true } - do: + catch: /There are no ingest nodes in this cluster, unable to forward request to an ingest node./ ingest.simulate: id: "my_pipeline" body: > @@ -61,11 +62,6 @@ } ] } - - length: { docs: 1 } - - match: { docs.0.doc._source.foo: "bar" } - - match: { docs.0.doc._source.field2: "_value" } - - length: { docs.0.doc._ingest: 1 } - - is_true: docs.0.doc._ingest.timestamp --- "Test index api with pipeline id fails when node.ingest is set to false": diff --git a/qa/smoke-test-ingest-with-all-dependencies/build.gradle b/qa/smoke-test-ingest-with-all-dependencies/build.gradle index 1f043272b417..6181548757c6 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/build.gradle +++ b/qa/smoke-test-ingest-with-all-dependencies/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' dependencies { yamlRestTestImplementation project(':modules:lang-mustache') @@ -24,4 +24,4 @@ tasks.named("yamlRestTestTestingConventions").configure { tasks.named("forbiddenPatterns").configure { exclude '**/*.mmdb' -} \ No newline at end of file +} diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 37615f938213..acead4425134 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -5,11 +5,6 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ - - -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.internal-yaml-rest-test' restResources { @@ -18,22 +13,12 @@ restResources { } } -File repo = file("$buildDir/testclusters/repo") -testClusters.matching { it.name == "yamlRestTest" }.configureEach { - numberOfNodes = 2 - setting 'path.repo', repo.absolutePath -} - -testClusters.configureEach { - setting 'xpack.security.enabled', 'false' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") +dependencies { + clusterModules project(":modules:mapper-extras") + clusterModules project(":modules:ingest-common") } tasks.named("yamlRestTest").configure { - doFirst { - project.delete(repo) - repo.mkdirs() - } systemProperty 'tests.rest.blacklist', [ 'cat.templates/10_basic/No templates', 'cat.templates/10_basic/Sort templates', diff --git a/qa/smoke-test-multinode/src/yamlRestTest/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java b/qa/smoke-test-multinode/src/yamlRestTest/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java index dd5e1a883d51..3368549c1d7a 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java +++ b/qa/smoke-test-multinode/src/yamlRestTest/java/org/elasticsearch/smoketest/SmokeTestMultiNodeClientYamlTestSuiteIT.java @@ -13,12 +13,25 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; @TimeoutSuite(millis = 40 * TimeUnits.MINUTE) // some of the windows test VMs are slow as hell public class SmokeTestMultiNodeClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(2) + .module("mapper-extras") + .module("ingest-common") + // The first node does not have the ingest role so we're sure ingest requests are forwarded: + .node(0, n -> n.setting("node.roles", "[master,data,ml,remote_cluster_client,transform]")) + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + public SmokeTestMultiNodeClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } @@ -27,4 +40,9 @@ public SmokeTestMultiNodeClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandi public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml index 867f4ad8d5d4..fc078adeda62 100644 --- a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/30_desired_balance.yml @@ -64,3 +64,45 @@ setup: - gte: { routing_table.test.1.desired.ignored: 0 } - is_true: 'routing_table.test.1.desired.node_ids' +--- +"Test cluster_balance_stats": + + - skip: + version: " - 8.6.99" + reason: "Field added in in 8.7.0" + + - do: + _internal.get_desired_balance: { } + + - is_true: 'cluster_balance_stats' + - is_true: 'cluster_balance_stats.tiers' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.total' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.min' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.max' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.average' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.total' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.min' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.max' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.average' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.std_dev' + - is_true: 'cluster_balance_stats.nodes' + - is_true: 'cluster_balance_stats.nodes.test-cluster-0' + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.shard_count' : 0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_write_load': 0.0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_disk_usage_bytes' : 0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.actual_disk_usage_bytes' : 0 } diff --git a/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/40_simulate.yml b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/40_simulate.yml new file mode 100644 index 000000000000..eb27b021eb11 --- /dev/null +++ b/qa/smoke-test-multinode/src/yamlRestTest/resources/rest-api-spec/test/smoke_test_multinode/40_simulate.yml @@ -0,0 +1,978 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test simulate with stored ingest pipeline": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "_value" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.simulate: + id: "my_pipeline" + body: > + { + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc._source.field2: "_value" } + - length: { docs.0.doc._ingest: 1 } + - is_true: docs.0.doc._ingest.timestamp + +--- +"Test simulate with provided pipeline definition": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "_value" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 1 } + +--- +"Test simulate with provided invalid pipeline definition": + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "tag" : "fails", + "value" : "_value" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { error.root_cause.0.type: "parse_exception" } + - match: { error.root_cause.0.reason: "[field] required property is missing" } + - match: { error.root_cause.0.processor_tag: "fails" } + - match: { error.root_cause.0.processor_type: "set" } + - match: { error.root_cause.0.property_name: "field" } + +--- +"Test simulate without id": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value" : "_value" + } + } + ] + }, + "docs": [ + { + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 1 } + +--- +"Test simulate with provided pipeline definition with on_failure block": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "does_not_exist", + "target_field" : "field2", + "on_failure" : [ + { + "set" : { + "field" : "field2", + "value" : "_value" + } + } + ] + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._source.foo: "bar" } + - match: { docs.0.doc._source.field2: "_value" } + - length: { docs.0.doc._ingest: 1 } + - is_true: docs.0.doc._ingest.timestamp + +--- +"Test simulate with no provided pipeline or pipeline_id": + - do: + catch: bad_request + ingest.simulate: + body: > + { + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - is_false: error.root_cause.0.processor_type + - is_false: error.root_cause.0.processor_tag + - match: { error.root_cause.0.property_name: "pipeline" } + - match: { error.reason: "[pipeline] required property is missing" } + +--- +"Test simulate with invalid processor config": + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { error.root_cause.0.type: "parse_exception" } + - match: { error.root_cause.0.reason: "[value] required property is missing" } + - match: { error.root_cause.0.processor_type: "set" } + - match: { error.root_cause.0.property_name: "value" } + - is_false: error.root_cause.0.processor_tag + +--- +"Test simulate with verbose flag": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "tag" : "processor[set]-0", + "field" : "field2.value", + "value" : "_value" + } + }, + { + "set" : { + "field" : "field3", + "value" : "third_val" + } + }, + { + "uppercase" : { + "field" : "field2.value" + } + }, + { + "lowercase" : { + "field" : "foo.bar.0.item" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": { + "bar" : [ {"item": "HELLO"} ] + } + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 4 } + - match: { docs.0.processor_results.0.tag: "processor[set]-0" } + - length: { docs.0.processor_results.0.doc._source: 2 } + - match: { docs.0.processor_results.0.doc._source.foo.bar.0.item: "HELLO" } + - match: { docs.0.processor_results.0.doc._source.field2.value: "_value" } + - length: { docs.0.processor_results.0.doc._ingest: 2 } + - is_true: docs.0.processor_results.0.doc._ingest.timestamp + - is_true: docs.0.processor_results.0.doc._ingest.pipeline + - length: { docs.0.processor_results.1.doc._source: 3 } + - match: { docs.0.processor_results.1.doc._source.foo.bar.0.item: "HELLO" } + - match: { docs.0.processor_results.1.doc._source.field2.value: "_value" } + - match: { docs.0.processor_results.1.doc._source.field3: "third_val" } + - length: { docs.0.processor_results.1.doc._ingest: 2 } + - is_true: docs.0.processor_results.1.doc._ingest.timestamp + - is_true: docs.0.processor_results.1.doc._ingest.pipeline + - length: { docs.0.processor_results.2.doc._source: 3 } + - match: { docs.0.processor_results.2.doc._source.foo.bar.0.item: "HELLO" } + - match: { docs.0.processor_results.2.doc._source.field2.value: "_VALUE" } + - match: { docs.0.processor_results.2.doc._source.field3: "third_val" } + - length: { docs.0.processor_results.2.doc._ingest: 2 } + - is_true: docs.0.processor_results.2.doc._ingest.timestamp + - is_true: docs.0.processor_results.2.doc._ingest.pipeline + - length: { docs.0.processor_results.3.doc._source: 3 } + - match: { docs.0.processor_results.3.doc._source.foo.bar.0.item: "hello" } + - match: { docs.0.processor_results.3.doc._source.field2.value: "_VALUE" } + - match: { docs.0.processor_results.3.doc._source.field3: "third_val" } + - length: { docs.0.processor_results.3.doc._ingest: 2 } + - is_true: docs.0.processor_results.3.doc._ingest.timestamp + - is_true: docs.0.processor_results.3.doc._ingest.pipeline + +--- +"Test simulate with exception thrown": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "uppercase" : { + "field" : "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "not_foo": "bar" + } + }, + { + "_index": "index", + "_id": "id2", + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 2 } + - match: { docs.0.error.type: "illegal_argument_exception" } + - match: { docs.1.doc._source.foo: "BAR" } + - length: { docs.1.doc._ingest: 1 } + - is_true: docs.1.doc._ingest.timestamp + +--- +"Test verbose simulate with exception thrown": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "convert" : { + "field" : "foo", + "type" : "integer" + } + }, + { + "uppercase" : { + "field" : "bar" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar", + "bar": "hello" + } + }, + { + "_index": "index", + "_id": "id2", + "_source": { + "foo": "5", + "bar": "hello" + } + } + ] + } + - length: { docs: 2 } + - length: { docs.0.processor_results: 1 } + - match: { docs.0.processor_results.0.error.type: "illegal_argument_exception" } + - length: { docs.1.processor_results: 2 } + - match: { docs.1.processor_results.0.doc._index: "index" } + - match: { docs.1.processor_results.0.doc._source.foo: 5 } + - match: { docs.1.processor_results.0.doc._source.bar: "hello" } + - length: { docs.1.processor_results.0.doc._ingest: 2 } + - is_true: docs.1.processor_results.0.doc._ingest.timestamp + - is_true: docs.1.processor_results.0.doc._ingest.pipeline + - match: { docs.1.processor_results.1.doc._source.foo: 5 } + - match: { docs.1.processor_results.1.doc._source.bar: "HELLO" } + - length: { docs.1.processor_results.1.doc._ingest: 2 } + - is_true: docs.1.processor_results.1.doc._ingest.timestamp + - is_true: docs.1.processor_results.1.doc._ingest.pipeline + +--- +"Test verbose simulate with error in pipeline": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "does_not_exist", + "target_field" : "_value" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "pipeline" : { + "name" : "my_pipeline" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar", + "bar": "hello" + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 2 } + - match: { docs.0.processor_results.0.processor_type: "pipeline" } + - match: { docs.0.processor_results.0.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "rename" } + - match: { docs.0.processor_results.1.status: "error" } + - match: { docs.0.processor_results.1.error.root_cause.0.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.1.error.root_cause.0.reason: "field [does_not_exist] doesn't exist" } + - match: { docs.0.processor_results.1.error.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.1.error.reason: "field [does_not_exist] doesn't exist" } + +--- +"Test verbose simulate with on_failure": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline" : { + "description": "_description", + "processors": [ + { + "set" : { + "tag" : "setstatus-1", + "field" : "status", + "value" : 200 + } + }, + { + "rename" : { + "tag" : "rename-1", + "field" : "foofield", + "target_field" : "field1", + "on_failure" : [ + { + "set" : { + "tag" : "set on_failure rename", + "field" : "foofield", + "value" : "exists" + } + }, + { + "rename" : { + "field" : "foofield2", + "target_field" : "field1", + "on_failure" : [ + { + "set" : { + "field" : "foofield2", + "value" : "ran" + } + } + ] + } + } + ] + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "field1": "123.42 400 " + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 5 } + - match: { docs.0.processor_results.0.tag: "setstatus-1" } + - match: { docs.0.processor_results.0.doc._source.field1: "123.42 400 " } + - match: { docs.0.processor_results.0.doc._source.status: 200 } + - match: { docs.0.processor_results.1.tag: "rename-1" } + - match: { docs.0.processor_results.1.error.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.1.error.reason: "field [foofield] doesn't exist" } + - match: { docs.0.processor_results.2.tag: "set on_failure rename" } + - is_false: docs.0.processor_results.3.tag + - is_false: docs.0.processor_results.4.tag + - match: { docs.0.processor_results.4.doc._source.foofield: "exists" } + - match: { docs.0.processor_results.4.doc._source.foofield2: "ran" } + - match: { docs.0.processor_results.4.doc._source.field1: "123.42 400 " } + - match: { docs.0.processor_results.4.doc._source.status: 200 } + +--- +"Test verbose simulate with ignore_failure and thrown exception": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline" : { + "description": "_description", + "processors": [ + { + "set" : { + "tag" : "setstatus-1", + "field" : "status", + "value" : 200 + } + }, + { + "rename" : { + "tag" : "rename-1", + "field" : "foofield", + "target_field" : "field1", + "ignore_failure": true, + "on_failure" : [ + { + "set" : { + "tag" : "set on_failure rename", + "field" : "foofield", + "value" : "exists" + } + }, + { + "rename" : { + "field" : "foofield2", + "target_field" : "field1", + "on_failure" : [ + { + "set" : { + "field" : "foofield2", + "value" : "ran" + } + } + ] + } + } + ] + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "field1": "123.42 400 " + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 2 } + - match: { docs.0.processor_results.0.tag: "setstatus-1" } + - match: { docs.0.processor_results.0.doc._source.field1: "123.42 400 " } + - match: { docs.0.processor_results.0.doc._source.status: 200 } + - match: { docs.0.processor_results.0.status: "success" } + - match: { docs.0.processor_results.0.processor_type: "set" } + - match: { docs.0.processor_results.1.tag: "rename-1" } + - match: { docs.0.processor_results.1.ignored_error.error.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.1.ignored_error.error.reason: "field [foofield] doesn't exist" } + - match: { docs.0.processor_results.1.doc._source.field1: "123.42 400 " } + - match: { docs.0.processor_results.1.doc._source.status: 200 } + - match: { docs.0.processor_results.1.status: "error_ignored" } + - match: { docs.0.processor_results.1.processor_type: "rename" } + +--- +"Test verbose simulate with ignore_failure and no exception thrown": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline" : { + "description": "_description", + "processors": [ + { + "set" : { + "tag" : "setstatus-1", + "field" : "status", + "value" : 200 + } + }, + { + "rename" : { + "tag" : "rename-1", + "field" : "status", + "target_field" : "new_status", + "ignore_failure": true + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "field1": "123.42 400 " + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 2 } + - length: { docs.0.processor_results.0: 4 } + - match: { docs.0.processor_results.0.tag: "setstatus-1" } + - match: { docs.0.processor_results.0.status: "success" } + - match: { docs.0.processor_results.0.processor_type: "set" } + - match: { docs.0.processor_results.0.doc._source.field1: "123.42 400 " } + - match: { docs.0.processor_results.0.doc._source.status: 200 } + - length: { docs.0.processor_results.1: 4 } + - match: { docs.0.processor_results.1.tag: "rename-1" } + - match: { docs.0.processor_results.1.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "rename" } + - match: { docs.0.processor_results.1.doc._source.new_status: 200 } + +--- +"Test verbose simulate with Pipeline Processor with Circular Pipelines": + - do: + ingest.put_pipeline: + id: "outer" + body: > + { + "description" : "outer pipeline", + "processors" : [ + { + "pipeline" : { + "name": "inner" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "inner" + body: > + { + "description" : "inner pipeline", + "processors" : [ + { + "pipeline" : { + "name": "outer" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "processors" : [ + { + "pipeline" : { + "name": "outer" + } + } + ] + } + , + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "field1": "123.42 400 " + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 1 } + - match: { docs.0.processor_results.0.error.reason: "Cycle detected for pipeline: outer" } + +--- +"Test verbose simulate with Pipeline Processor with Multiple Pipelines": + - do: + ingest.put_pipeline: + id: "pipeline1" + body: > + { + "processors": [ + { + "set": { + "field": "pipeline1", + "value": true + } + }, + { + "pipeline": { + "name": "pipeline2" + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.put_pipeline: + id: "pipeline2" + body: > + { + "processors": [ + { + "set": { + "field": "pipeline2", + "value": true + } + } + ] + } + - match: { acknowledged: true } + + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "processors": [ + { + "set": { + "field": "pipeline0", + "value": true, + "description" : "first_set" + } + }, + { + "pipeline": { + "name": "pipeline1" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "field1": "123.42 400 " + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 5 } + - match: { docs.0.processor_results.0.doc._source.pipeline0: true } + - match: { docs.0.processor_results.0.status: "success" } + - match: { docs.0.processor_results.0.processor_type: "set" } + - match: { docs.0.processor_results.0.description: "first_set" } + - is_false: docs.0.processor_results.0.doc._source.pipeline1 + - is_false: docs.0.processor_results.0.doc._source.pipeline2 + - match: { docs.0.processor_results.1.doc: null } + - match: { docs.0.processor_results.1.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "pipeline" } + - match: { docs.0.processor_results.2.doc._source.pipeline0: true } + - match: { docs.0.processor_results.2.doc._source.pipeline1: true } + - is_false: docs.0.processor_results.2.doc._source.pipeline2 + - match: { docs.0.processor_results.3.doc: null } + - match: { docs.0.processor_results.3.status: "success" } + - match: { docs.0.processor_results.3.processor_type: "pipeline" } + - match: { docs.0.processor_results.4.doc._source.pipeline0: true } + - match: { docs.0.processor_results.4.doc._source.pipeline1: true } + - match: { docs.0.processor_results.4.doc._source.pipeline2: true } + +--- +"Test verbose simulate with true conditional and on failure": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "processors": [ + { + "rename": { + "tag": "gunna_fail", + "if": "true", + "field": "foo1", + "target_field": "fieldA", + "on_failure": [ + { + "set": { + "field": "failed1", + "value": "failed1", + "tag": "failed1" + } + }, + { + "rename": { + "tag": "gunna_fail_again", + "if": "true", + "field": "foo2", + "target_field": "fieldA", + "on_failure": [ + { + "set": { + "field": "failed2", + "value": "failed2", + "tag": "failed2" + } + } + ] + } + } + ] + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 4 } + - match: { docs.0.processor_results.0.tag: "gunna_fail" } + - match: { docs.0.processor_results.0.error.reason: "field [foo1] doesn't exist" } + - match: { docs.0.processor_results.0.status: "error" } + - match: { docs.0.processor_results.0.processor_type: "rename" } + - match: { docs.0.processor_results.1.tag: "failed1" } + - match: { docs.0.processor_results.1.doc._source.failed1: "failed1" } + - match: { docs.0.processor_results.1.doc._ingest.on_failure_processor_tag: "gunna_fail" } + - match: { docs.0.processor_results.1.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "set" } + - match: { docs.0.processor_results.2.tag: "gunna_fail_again" } + - match: { docs.0.processor_results.2.error.reason: "field [foo2] doesn't exist" } + - match: { docs.0.processor_results.2.status: "error" } + - match: { docs.0.processor_results.2.processor_type: "rename" } + - match: { docs.0.processor_results.3.tag: "failed2" } + - match: { docs.0.processor_results.3.doc._source.failed1: "failed1" } + - match: { docs.0.processor_results.3.doc._source.failed2: "failed2" } + - match: { docs.0.processor_results.3.doc._ingest.on_failure_processor_tag: "gunna_fail_again" } + - match: { docs.0.processor_results.3.status: "success" } + - match: { docs.0.processor_results.3.processor_type: "set" } + + +--- +"Test simulate with pipeline with conditional and skipped and dropped": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "description": "processor_description", + "tag": "processor_tag", + "field" : "field2", + "value" : "_value" + } + }, + { + "drop" : { + "if": "false" + } + }, + { + "drop" : { + "if": "true" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_source": { + "foo": "bar" + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 3 } + - match: { docs.0.processor_results.0.doc._source.field2: "_value" } + - match: { docs.0.processor_results.0.description: "processor_description" } + - match: { docs.0.processor_results.0.tag: "processor_tag" } + - match: { docs.0.processor_results.0.status: "success" } + - match: { docs.0.processor_results.0.processor_type: "set" } + - match: { docs.0.processor_results.1.status: "skipped" } + - match: { docs.0.processor_results.1.processor_type: "drop" } + - match: { docs.0.processor_results.1.if.condition: "false" } + - match: { docs.0.processor_results.1.if.result: false } + - match: { docs.0.processor_results.2.status: "dropped" } + - match: { docs.0.processor_results.2.processor_type: "drop" } + - match: { docs.0.processor_results.2.if.condition: "true" } + - match: { docs.0.processor_results.2.if.result: true } +--- +"Test simulate with provided pipeline that does not exist": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "pipeline": { + "name": "____pipeline_doesnot_exist___" + } + } + ] + }, + "docs": [ + { + "_source": {} + } + ] + } + - match: { docs.0.processor_results.0.status: "error" } + - match: { docs.0.processor_results.0.error.root_cause.0.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.0.error.root_cause.0.reason: "Pipeline processor configured for non-existent pipeline [____pipeline_doesnot_exist___]" } diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle index 06a9d9a6613a..7ae46b8e0d68 100644 --- a/qa/smoke-test-plugins/build.gradle +++ b/qa/smoke-test-plugins/build.gradle @@ -9,7 +9,7 @@ import org.apache.tools.ant.filters.ReplaceTokens import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.legacy-yaml-rest-test' ext.pluginPaths = [] project(':plugins').getChildProjects().each { pluginName, pluginProject -> diff --git a/qa/stable-api/build.gradle b/qa/stable-api/build.gradle new file mode 100644 index 000000000000..393c271967c6 --- /dev/null +++ b/qa/stable-api/build.gradle @@ -0,0 +1,6 @@ +subprojects { + apply plugin: 'elasticsearch.java' + apply plugin: 'elasticsearch.bwc-test' + + group = 'org.elasticsearch.qa.stable-api' +} diff --git a/qa/stable-api/logging/build.gradle b/qa/stable-api/logging/build.gradle new file mode 100644 index 000000000000..ada0dc1d169b --- /dev/null +++ b/qa/stable-api/logging/build.gradle @@ -0,0 +1,4 @@ +ext.stableApiSince = "8.7.0" + +apply plugin: 'elasticsearch.stable-api' + diff --git a/qa/stable-api/plugin-analysis-api/build.gradle b/qa/stable-api/plugin-analysis-api/build.gradle new file mode 100644 index 000000000000..c3fdc92c36bb --- /dev/null +++ b/qa/stable-api/plugin-analysis-api/build.gradle @@ -0,0 +1,3 @@ +ext.stableApiSince = "8.7.0" + +apply plugin: 'elasticsearch.stable-api' diff --git a/qa/stable-api/plugin-api/build.gradle b/qa/stable-api/plugin-api/build.gradle new file mode 100644 index 000000000000..c3fdc92c36bb --- /dev/null +++ b/qa/stable-api/plugin-api/build.gradle @@ -0,0 +1,3 @@ +ext.stableApiSince = "8.7.0" + +apply plugin: 'elasticsearch.stable-api' diff --git a/qa/system-indices/build.gradle b/qa/system-indices/build.gradle index a8be8e8374d5..4cb60860cca4 100644 --- a/qa/system-indices/build.gradle +++ b/qa/system-indices/build.gradle @@ -7,7 +7,7 @@ */ apply plugin: 'elasticsearch.base-internal-es-plugin' -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' esplugin { name 'system-indices-qa' diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index aae98def1db8..dd1dd0a2334f 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.legacy-java-rest-test' testClusters.configureEach { setting 'xpack.security.enabled', 'false' diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index e92d6767a803..e5049a117905 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -33,9 +33,8 @@ artifacts { restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } -testClusters.configureEach { - module ':modules:mapper-extras' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") +dependencies { + clusterModules project(":modules:mapper-extras") } tasks.named("yamlRestTestV7CompatTransform").configure { task -> diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.health.json deleted file mode 100644 index 4528c33b0853..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.health.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "_internal.health":{ - "documentation":{ - "url": null, - "description":"Returns the health of the cluster." - }, - "stability":"experimental", - "visibility":"private", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/_internal/_health", - "methods":[ - "GET" - ] - }, - { - "path":"/_internal/_health/{feature}", - "methods":[ - "GET" - ], - "parts":{ - "feature":{ - "type":"string", - "description":"A feature of the cluster, as returned by the top-level health API" - } - } - } - ] - }, - "params":{ - "timeout":{ - "type":"time", - "description":"Explicit operation timeout" - }, - "verbose":{ - "type":"boolean", - "description":"Opt in for more information about the health of the system", - "default":true - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.prevalidate_node_removal.json b/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.prevalidate_node_removal.json index 8563c17ceb53..8c945f2894f2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.prevalidate_node_removal.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/_internal.prevalidate_node_removal.json @@ -35,6 +35,10 @@ "master_timeout":{ "type":"time", "description":"Explicit operation timeout for connection to master node" + }, + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/health_report.json b/rest-api-spec/src/main/resources/rest-api-spec/api/health_report.json new file mode 100644 index 000000000000..2cc9b8f00831 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/health_report.json @@ -0,0 +1,53 @@ +{ + "health_report": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/health-api.html", + "description": "Returns the health of the cluster." + }, + "stability": "stable", + "visibility": "public", + "headers": { + "accept": [ + "application/json" + ] + }, + "url": { + "paths": [ + { + "path": "/_health_report", + "methods": [ + "GET" + ] + }, + { + "path": "/_health_report/{feature}", + "methods": [ + "GET" + ], + "parts": { + "feature": { + "type": "string", + "description": "A feature of the cluster, as returned by the top-level health API" + } + } + } + ] + }, + "params":{ + "timeout":{ + "type":"time", + "description":"Explicit operation timeout" + }, + "verbose":{ + "type":"boolean", + "description":"Opt in for more information about the health of the system", + "default":true + }, + "size": { + "type": "int", + "description": "Limit the number of affected resources the health API returns", + "default": 1000 + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json index ee92ab44ab8f..81f396a30b36 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.clear_trained_model_deployment_cache.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/clear-trained-model-deployment-cache.html", "description":"Clear the cached results from a trained model deployment" }, - "stability":"beta", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model.json index 539d46eb88dd..6041155b1ea6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.infer_trained_model.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/infer-trained-model.html", "description":"Evaluate a trained model." }, - "stability":"beta", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_definition_part.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_definition_part.json index 09e34a5c9eea..bf826f36e93c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_definition_part.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_definition_part.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-definition-part.html", "description":"Creates part of a trained model definition" }, - "stability":"beta", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json index 1d1c6005b369..f6fd70a75c00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html", "description":"Creates a trained model vocabulary" }, - "stability":"beta", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json index db3418759898..8f98bc03670a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.start_trained_model_deployment.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/start-trained-model-deployment.html", "description":"Start a trained model deployment." }, - "stability":"beta", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json index 5cb9d5764067..016d88c684e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.stop_trained_model_deployment.json @@ -4,7 +4,7 @@ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/stop-trained-model-deployment.html", "description":"Stop a trained model deployment." }, - "stability":"beta", + "stability":"stable", "visibility":"public", "headers":{ "accept": [ "application/json"], diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.update_trained_model_deployment.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.update_trained_model_deployment.json index df9ab6cd5e73..0d824ece4c8b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.update_trained_model_deployment.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.update_trained_model_deployment.json @@ -1,7 +1,7 @@ { "ml.update_trained_model_deployment":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-trained-model-deployment.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/update-trained-model-deployment.html", "description":"Updates certain properties of trained model deployment." }, "stability":"beta", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/semantic_search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/semantic_search.json deleted file mode 100644 index 3af11e258227..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/semantic_search.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "semantic_search":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/semantic-search.html", - "description":"Semantic search API using dense vector similarity" - }, - "stability":"experimental", - "visibility":"public", - "headers":{ - "accept": [ "application/json"], - "content_type": ["application/json"] - }, - "url":{ - "paths":[ - { - "path":"/{index}/_semantic_search", - "methods":[ - "GET", - "POST" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names to search; use `_all` to perform the operation on all indices" - } - } - } - ] - }, - "params": { - "routing":{ - "type":"list", - "description":"A comma-separated list of specific routing values" - } - }, - "body":{ - "description":"The search definition" - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.get_transform_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.get_transform_stats.json index f425c41f0997..8139fac7a818 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.get_transform_stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.get_transform_stats.json @@ -36,6 +36,11 @@ "required":false, "description":"specifies a max number of transform stats to get, defaults to 100" }, + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the stats" + }, "allow_no_match":{ "type":"boolean", "required":false, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.schedule_now_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.schedule_now_transform.json new file mode 100644 index 000000000000..81ba9e071cfd --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.schedule_now_transform.json @@ -0,0 +1,38 @@ +{ + "transform.schedule_now_transform":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/schedule-now-transform.html", + "description":"Schedules now a transform." + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept":[ "application/json"], + "content_type":["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_transform/{transform_id}/_schedule_now", + "methods":[ + "POST" + ], + "parts":{ + "transform_id":{ + "type":"string", + "required":true, + "description":"The id of the transform." + } + } + } + ] + }, + "params":{ + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the time to wait for the scheduling to take place" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.start_transform.json b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.start_transform.json index 7c002957ea67..b7fb849987af 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/transform.start_transform.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/transform.start_transform.json @@ -26,6 +26,11 @@ ] }, "params":{ + "from":{ + "type":"string", + "required":false, + "description":"Restricts the set of transformed entities to those changed after this time" + }, "timeout":{ "type":"time", "required":false, diff --git a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java index 287c8a57627b..a10d53389af9 100644 --- a/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java +++ b/rest-api-spec/src/yamlRestTest/java/org/elasticsearch/test/rest/ClientYamlTestSuiteIT.java @@ -13,8 +13,11 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; /** Rest integration test. Runs against a cluster started by {@code gradle integTest} */ @@ -22,6 +25,12 @@ @TimeoutSuite(millis = 40 * TimeUnits.MINUTE) public class ClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .module("mapper-extras") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + public ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { super(testCandidate); } @@ -30,4 +39,9 @@ public ClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate public static Iterable parameters() throws Exception { return createParameters(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml index ef904b341deb..0784cffe3b84 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml @@ -24,20 +24,20 @@ bulk: refresh: true body: - - index: + - create: _index: test_index _id: id_1 dynamic_templates: location: location - - { "location": [ -71.34, 41.12 ]} + - { "location": "41.12,-71.34"} - index: _index: test_index _id: id_2 dynamic_templates: location: location - - { "location": "41.12,-71.34"} + - { "location": [ -71.34, 41.12 ]} - match: { errors: false } - - match: { items.0.index.result: created } + - match: { items.0.create.result: created } - match: { items.1.index.result: created } - do: @@ -65,7 +65,7 @@ _index: test_index _id: id_3 - { "my_location": "41.12,-71.34" } # matches the field name defined in the `my_location` template - - index: + - create: _index: test_index _id: id_4 dynamic_templates: @@ -169,3 +169,64 @@ - match: { items.0.index.error.reason: "failed to parse field [foo] of type [keyword] in document with id 'id_11'. Preview of field's value: '{bar=hello world}'"} - match: { items.1.index.status: 201 } - match: { items.1.index.result: created } + +--- +"Dynamic templates with op_type": + - skip: + version: " - 8.6.0" + reason: "bug fixed in 8.6.1" + + - do: + indices.create: + index: test_index + body: + mappings: + dynamic_templates: + - location: + mapping: + type: geo_point + - my_location: + match: my* + mapping: + type: geo_point + - string: + mapping: + type: keyword + - do: + bulk: + refresh: true + body: + - index: + _index: test_index + _id: id_1 + op_type: create + dynamic_templates: + location: location + - { "location": "41.12,-71.34"} + - index: + _index: test_index + _id: id_2 + op_type: index + dynamic_templates: + location: location + - { "location": [ -71.34, 41.12 ]} + - match: { errors: false } + - match: { items.0.create.result: created } + - match: { items.1.index.result: created } + + - do: + search: + index: test_index + body: + query: + geo_bounding_box: + location: + top_left: + lat: 42 + lon: -72 + bottom_right: + lat: 40 + lon: -74 + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: id_1 } + - match: { hits.hits.1._id: id_2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.nodes/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.nodes/10_basic.yml index eff06113c782..a81307bec2c7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.nodes/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.nodes/10_basic.yml @@ -7,7 +7,7 @@ - match: $body: | / #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name - ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdfhilmrstvw]{1,11}) \s+ [-*x] \s+ .* \n)+ $/ + ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdfhilmrstvwIS]{1,11}) \s+ [-*x] \s+ .* \n)+ $/ - do: cat.nodes: @@ -16,7 +16,7 @@ - match: $body: | /^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role \s+ master \s+ name \n - ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdfhilmrstvw]{1,11}) \s+ [-*x] \s+ .* \n)+ $/ + ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdfhilmrstvwIS]{1,11}) \s+ [-*x] \s+ .* \n)+ $/ - do: cat.nodes: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml index 177b8adab19b..5969e946494c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_balance/10_basic.yml @@ -19,6 +19,49 @@ setup: - gte: { stats.reconciliation_time_in_millis: 0 } - match: { routing_table: {} } +--- +"Test cluster_balance_stats": + + - skip: + version: " - 8.6.99" + reason: "Field added in in 8.7.0" + + - do: + _internal.get_desired_balance: { } + + - is_true: 'cluster_balance_stats' + - is_true: 'cluster_balance_stats.tiers' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.total' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.min' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.max' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.average' + - is_true: 'cluster_balance_stats.tiers.data_content.shard_count.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_write_load.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.total' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.min' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.max' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.average' + - is_true: 'cluster_balance_stats.tiers.data_content.forecast_disk_usage.std_dev' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.total' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.min' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.max' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.average' + - is_true: 'cluster_balance_stats.tiers.data_content.actual_disk_usage.std_dev' + - is_true: 'cluster_balance_stats.nodes' + - is_true: 'cluster_balance_stats.nodes.test-cluster-0' + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.shard_count' : 0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_write_load': 0.0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.forecast_disk_usage_bytes' : 0 } + - gte: { 'cluster_balance_stats.nodes.test-cluster-0.actual_disk_usage_bytes' : 0 } + --- "Test get desired balance for single shard": - do: @@ -51,8 +94,9 @@ setup: - is_true: 'routing_table.test.0.current.0.node_is_desired' - is_false: 'routing_table.test.0.current.0.relocating_node' - is_false: 'routing_table.test.0.current.0.relocating_node_is_desired' + - is_false: 'routing_table.test.0.current.0.forecast_write_load' + - is_false: 'routing_table.test.0.current.0.forecast_shard_size_in_bytes' - match: { routing_table.test.0.desired.total: 1 } - gte: { routing_table.test.0.desired.unassigned: 0 } - gte: { routing_table.test.0.desired.ignored: 0 } - is_true: 'routing_table.test.0.desired.node_ids' - diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml index 20760fef4b1d..a051b3626b21 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/10_basic.yml @@ -211,7 +211,7 @@ - skip: version: " - 7.1.99" reason: "closed indices are replicated starting version 7.2.0" - features: ["allowed_warnings"] + features: ["allowed_warnings", "default_shards"] - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml index 6e90d00ca595..740836efcdc4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.prevalidate_node_removal/10_basic.yml @@ -2,8 +2,8 @@ "Prevalidation basic test": - skip: features: contains - version: "- 8.5.99" - reason: "API added in 8.6.0" + version: "- 8.6.99" + reason: "The reason field was introduced in 8.7.0" # Fetch a node ID and stash it in node_id - do: @@ -16,7 +16,7 @@ ids: $node_id - match: { is_safe: true} - - contains: {nodes: {id: "$node_id", result: {is_safe: true, message: ""}}} + - contains: {nodes: {id: "$node_id", result: {is_safe: true, reason: no_problems, message: ""}}} --- "Prevalidation with no node specified": - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml index 566cc777faa4..cd83a7868528 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/get/100_synthetic_source.yml @@ -600,6 +600,7 @@ _doc_count: _source: mode: synthetic + # with _doc_count - do: index: index: test @@ -608,7 +609,6 @@ _doc_count: body: _doc_count: 3 foo: bar - - do: get: index: test @@ -623,6 +623,98 @@ _doc_count: foo: bar - is_false: fields + # without _doc_count + - do: + index: + index: test + id: 2 + refresh: true + body: + foo: baz + - do: + get: + index: test + id: 2 + - match: {_index: "test"} + - match: {_id: "2"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + foo: baz + - is_false: fields + + # without immediately refreshing with _doc_count + - do: + index: + index: test + id: 3 + body: + _doc_count: 3 + foo: qux + - do: + get: + index: test + id: 3 + - match: {_index: "test"} + - match: {_id: "3"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + _doc_count: 3 + foo: qux + - is_false: fields + + # without immediately refreshing without _doc_count + - do: + index: + index: test + id: 4 + body: + foo: quux + - do: + get: + index: test + id: 4 + - match: {_index: "test"} + - match: {_id: "4"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + foo: quux + - is_false: fields + + # refresh all at once + - do: + indices.refresh: {} + - do: + get: + index: test + id: 3 + - match: {_index: "test"} + - match: {_id: "3"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + _doc_count: 3 + foo: qux + - is_false: fields + - do: + get: + index: test + id: 4 + - match: {_index: "test"} + - match: {_id: "4"} + - match: {_version: 1} + - match: {found: true} + - match: + _source: + foo: quux + - is_false: fields + --- ip with ignore_malformed: - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml index 4f2c7113d877..5e6ca8247997 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/10_basic.yml @@ -7,7 +7,7 @@ # reason: "health was only added in 8.2.0, and master_is_stable in 8.4.0" - do: - _internal.health: {} + health_report: { } - is_true: cluster_name - match: { status: "green" } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/30_feature.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/30_feature.yml index 575e4c7ad41d..449954220a1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/30_feature.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/30_feature.yml @@ -1,11 +1,11 @@ --- "cluster health test drilling down into a feature": - skip: - version: "- 8.5.99" - reason: "the verbose parameter was only added in 8.6" + version: "- 8.6.99" + reason: "the API path changed in 8.7" - do: - _internal.health: + health_report: feature: master_is_stable - is_true: cluster_name @@ -15,7 +15,7 @@ - is_true: indicators.master_is_stable.details.recent_masters - do: - _internal.health: + health_report: feature: master_is_stable verbose: false diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/40_diagnosis.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/40_diagnosis.yml index f475d3b0f1d0..76b81354b741 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/40_diagnosis.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/health/40_diagnosis.yml @@ -1,12 +1,14 @@ --- "Diagnosis": - skip: - version: "- 8.5.99" - reason: "diagnosis was redefined in 8.6.0" + version: "- 8.6.99" + reason: "the API path changed in 8.7" - do: indices.create: index: red_index + master_timeout: 1s + timeout: 1s body: settings: number_of_shards: 1 @@ -14,7 +16,7 @@ index.routing.allocation.enable: none - do: - _internal.health: + health_report: feature: shards_availability - is_true: cluster_name diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.blocks/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.blocks/10_basic.yml index f87553f222eb..b7e84d2fd526 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.blocks/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.blocks/10_basic.yml @@ -6,9 +6,6 @@ - do: indices.create: index: test_index - body: - settings: - number_of_replicas: 0 - do: indices.add_block: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml index 3e55ef207a0d..93447612406b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/10_basic.yml @@ -6,13 +6,11 @@ - do: indices.create: index: test_index - body: - settings: - number_of_replicas: 0 - do: cluster.health: - wait_for_status: green + index: [test_index] + wait_for_no_initializing_shards: true - do: indices.close: @@ -34,7 +32,8 @@ - do: cluster.health: - wait_for_status: green + index: [test_index] + wait_for_no_initializing_shards: true - do: search: @@ -88,6 +87,7 @@ - is_true: acknowledged - match: { acknowledged: true } - match: { shards_acknowledged: true } + --- "Close index response with result per index": - skip: @@ -96,23 +96,19 @@ - do: indices.create: index: index_1 - body: - settings: - number_of_replicas: 0 - do: indices.create: index: index_2 - body: - settings: - number_of_replicas: 0 - do: indices.create: index: index_3 - body: - settings: - number_of_replicas: 0 + + - do: + cluster.health: + index: [index_1, index_2, index_3] + wait_for_no_initializing_shards: true - do: indices.close: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml index f94a95c4fc98..ec71423bdc24 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.open/20_multiple_indices.yml @@ -2,24 +2,19 @@ setup: - do: indices.create: index: test_index1 - body: - settings: - number_of_replicas: 0 + - do: indices.create: index: test_index2 - body: - settings: - number_of_replicas: 0 + - do: indices.create: index: test_index3 - body: - settings: - number_of_replicas: 0 + - do: cluster.health: - wait_for_status: green + index: [test_index1, test_index2, test_index3] + wait_for_no_initializing_shards: true --- "All indices": @@ -46,7 +41,8 @@ setup: - do: cluster.health: - wait_for_status: green + index: [test_index1, test_index2, test_index3] + wait_for_no_initializing_shards: true - do: search: @@ -78,7 +74,8 @@ setup: - do: cluster.health: - wait_for_status: green + index: [test_index1, test_index2, test_index3] + wait_for_no_initializing_shards: true - do: search: @@ -110,7 +107,8 @@ setup: - do: cluster.health: - wait_for_status: green + index: [test_index1, test_index2, test_index3] + wait_for_no_initializing_shards: true - do: search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml index 5eef78a8c63b..2aaf492f0ff0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_index_template/15_composition.yml @@ -286,3 +286,66 @@ - is_false: purple-index.mappings.properties.nested.include_in_root - is_true: purple-index.mappings.properties.nested.include_in_parent + +--- +"Index template ignore_missing_component_template valid": + - skip: + version: " - 8.6.99" + reason: "index template v2 ignore_missing_component_template config not available before 8.7" + features: allowed_warnings + + - do: + cluster.put_component_template: + name: red + body: + template: + mappings: + properties: + foo: + type: keyword + + - do: + allowed_warnings: + - "index template [blue] has index patterns [purple-index] matching patterns from existing older templates [global] with patterns (global => [*]); this template [blue] will take precedence during new index creation" + indices.put_index_template: + name: blue + body: + index_patterns: ["purple-index"] + composed_of: ["red", "blue"] + ignore_missing_component_templates: ["blue"] + + - do: + indices.create: + index: purple-index + + - do: + indices.get: + index: purple-index + + - match: {purple-index.mappings.properties.foo: {type: keyword}} + +--- +"Index template ignore_missing_component_template invalid": + - skip: + version: " - 8.6.99" + reason: "index template v2 ignore_missing_component_template config not available before 8.7" + features: allowed_warnings + + - do: + cluster.put_component_template: + name: red + body: + template: + mappings: + properties: + foo: + type: keyword + + - do: + catch: /index_template \[blue\] invalid, cause \[index template \[blue\] specifies a missing component templates \[blue\] that does not exist/ + indices.put_index_template: + name: blue + body: + index_patterns: ["purple-index"] + composed_of: ["red", "blue"] + ignore_missing_component_templates: ["foo"] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/10_basic.yml index b9089689b0cf..8c39340a0af3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -7,7 +7,6 @@ body: settings: number_of_shards: 1 - number_of_replicas: 0 index.sort.field: rank mappings: properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/10_basic.yml index 47679b7f1058..7e05526eac9a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/10_basic.yml @@ -4,9 +4,6 @@ indices.create: index: test_1 body: - settings: - index: - number_of_replicas: 0 mappings: properties: foo: @@ -23,10 +20,6 @@ - do: indices.refresh: {} - - do: - cluster.health: - wait_for_status: green - - do: search: rest_total_hits_as_int: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml index 188e817ce759..c7477c5b538a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/20_docs.yml @@ -3,9 +3,7 @@ - do: indices.create: index: test_1 - body: - settings: - number_of_replicas: 0 + - do: index: index: test_1 @@ -27,10 +25,6 @@ - do: indices.refresh: {} - - do: - cluster.health: - wait_for_status: green - - do: search: rest_total_hits_as_int: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/30_unlike.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/30_unlike.yml index c913268d807d..33fe1afce75b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/30_unlike.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/mlt/30_unlike.yml @@ -3,9 +3,7 @@ - do: indices.create: index: test_1 - body: - settings: - number_of_replicas: 0 + - do: index: index: test_1 @@ -27,10 +25,6 @@ - do: indices.refresh: {} - - do: - cluster.health: - wait_for_status: green - - do: search: rest_total_hits_as_int: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/msearch/20_typed_keys.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/msearch/20_typed_keys.yml index 89bc8bf53b8c..6e67dbd61f08 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/msearch/20_typed_keys.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/msearch/20_typed_keys.yml @@ -4,8 +4,6 @@ setup: indices.create: index: test-0 body: - settings: - number_of_replicas: 0 mappings: properties: index_start_at: @@ -23,8 +21,6 @@ setup: indices.create: index: test-1 body: - settings: - number_of_replicas: 0 mappings: properties: index_start_at: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml index 6dc62b24a39d..f3cb3f7810e3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/range/10_basic.yml @@ -3,8 +3,6 @@ setup: indices.create: index: test body: - settings: - number_of_replicas: 0 mappings: "properties": "integer_range": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml index 6cc590a36d6c..2eadd3ec294a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/scroll/12_slices.yml @@ -51,6 +51,7 @@ setup: match_all: {} - set: {_scroll_id: scroll_id} + - match: { _shards.successful: 5 } - match: {hits.total: 3 } - length: {hits.hits: 3 } - match: {hits.hits.0._id: "2" } @@ -160,6 +161,7 @@ setup: match_all: {} - set: {_scroll_id: scroll_id} + - match: { _shards.successful: 5 } - match: { hits.total.value: 3 } - length: { hits.hits: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/20_fvh.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/20_fvh.yml index adb361098c94..a832ca222288 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/20_fvh.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/20_fvh.yml @@ -22,7 +22,6 @@ setup: - do: index: index: test - refresh: true body: id : 1 title : "The quick brown fox is brown" @@ -31,7 +30,6 @@ setup: - do: index: index: test - refresh: true body: id : 2 title : "The quick blue fox is blue" @@ -39,6 +37,10 @@ setup: - title: "purple octopus" - title: "purple fish" + - do: + indices.refresh: + index: [test] + --- "Highlight query": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml index a9593ae5dae1..8f2333605b31 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.highlight/50_synthetic_source.yml @@ -30,7 +30,6 @@ setup: index: index: test id: 1 - refresh: true body: foo: the quick brown fox jumped over the lazy dog @@ -38,7 +37,6 @@ setup: index: index: test id: 2 - refresh: true body: foo: - "To be, or not to be, that is the question:" @@ -75,6 +73,10 @@ setup: - "With this regard their currents turn awry" - "And lose the name of action." + - do: + indices.refresh: + index: [test] + --- keyword single plain: - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index 0ffec61788a7..26b8ab46bd4e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -6,8 +6,6 @@ setup: indices.create: index: test body: - settings: - number_of_replicas: 0 mappings: properties: name: @@ -17,6 +15,11 @@ setup: dims: 5 index: true similarity: l2_norm + another_vector: + type: dense_vector + dims: 5 + index: true + similarity: l2_norm - do: index: @@ -25,6 +28,7 @@ setup: body: name: cow.jpg vector: [230.0, 300.33, -34.8988, 15.555, -200.0] + another_vector: [130.0, 115.0, -1.02, 15.555, -100.0] - do: index: @@ -33,6 +37,7 @@ setup: body: name: moose.jpg vector: [-0.5, 100.0, -13, 14.8, -156.0] + another_vector: [-0.5, 50.0, -1, 1, 120] - do: index: @@ -41,6 +46,7 @@ setup: body: name: rabbit.jpg vector: [0.5, 111.3, -13.0, 14.8, -156.0] + another_vector: [-0.5, 11.0, 0, 12, 111.0] - do: indices.refresh: {} @@ -66,7 +72,25 @@ setup: - match: {hits.hits.1._id: "3"} - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} +--- +"kNN multi-field search only": + - skip: + version: ' - 8.6.99' + reason: 'multi-field kNN search added to search endpoint in 8.7' + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + - {field: vector, query_vector: [-0.5, 90.0, -10, 14.8, -156.0], k: 2, num_candidates: 3} + - {field: another_vector, query_vector: [-0.5, 11.0, 0, 12, 111.0], k: 2, num_candidates: 3} + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: {hits.hits.1._id: "2"} + - match: {hits.hits.1.fields.name.0: "moose.jpg"} --- "kNN search plus query": - skip: @@ -94,7 +118,31 @@ setup: - match: {hits.hits.2._id: "3"} - match: {hits.hits.2.fields.name.0: "rabbit.jpg"} +--- +"kNN multi-field search with query": + - skip: + version: ' - 8.6.99' + reason: 'multi-field kNN search added to search endpoint in 8.7' + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + - {field: vector, query_vector: [-0.5, 90.0, -10, 14.8, -156.0], k: 2, num_candidates: 3} + - {field: another_vector, query_vector: [-0.5, 11.0, 0, 12, 111.0], k: 2, num_candidates: 3} + query: + term: + name: cow.jpg + + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} + - match: {hits.hits.1._id: "1"} + - match: {hits.hits.1.fields.name.0: "cow.jpg"} + + - match: {hits.hits.2._id: "2"} + - match: {hits.hits.2.fields.name.0: "moose.jpg"} --- "kNN search with filter": - skip: @@ -110,7 +158,6 @@ setup: query_vector: [-0.5, 90.0, -10, 14.8, -156.0] k: 2 num_candidates: 3 - filter: term: name: "rabbit.jpg" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml index a6177a0f586b..a9120547c886 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/45_knn_search_byte.yml @@ -7,8 +7,6 @@ setup: indices.create: index: test body: - settings: - number_of_replicas: 0 mappings: properties: name: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_knn_search_filter_alias.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_knn_search_filter_alias.yml index b802d19bfe08..92065857bd57 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_knn_search_filter_alias.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_knn_search_filter_alias.yml @@ -9,7 +9,6 @@ setup: body: settings: number_of_shards: 1 - number_of_replicas: 0 mappings: dynamic: false properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/70_dense_vector_telemetry.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/70_dense_vector_telemetry.yml index 165ec327c73f..136dc807a89d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/70_dense_vector_telemetry.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/70_dense_vector_telemetry.yml @@ -8,7 +8,6 @@ setup: body: settings: index.number_of_shards: 2 - index.number_of_replicas: 0 mappings: properties: vector1: @@ -32,7 +31,6 @@ setup: body: settings: index.number_of_shards: 2 - index.number_of_replicas: 0 mappings: properties: vector1: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml index 95f1ee6cd9f3..f35bf691fbdc 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -7,7 +7,6 @@ setup: body: settings: number_of_shards: 3 - number_of_replicas: 0 mappings: properties: date: @@ -21,7 +20,6 @@ setup: body: settings: number_of_shards: 3 - number_of_replicas: 0 mappings: properties: date: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/250_distance_feature.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/250_distance_feature.yml index bafb7d52c718..e00d7bbb7236 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/250_distance_feature.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/250_distance_feature.yml @@ -7,8 +7,6 @@ setup: indices.create: index: index1 body: - settings: - number_of_replicas: 0 mappings: properties: my_date: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml index 415da2ad8767..c0b754c02275 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml @@ -168,8 +168,8 @@ disabling stored fields removes fetch sub phases: --- dfs knn vector profiling: - skip: - version: ' - 8.5.99' - reason: dfs profiling implemented in 8.6.0 + version: ' - 8.6.99' + reason: multi-knn dfs profiling implemented in 8.7.0 - do: indices.create: @@ -205,31 +205,31 @@ dfs knn vector profiling: num_candidates: 100 - match: { hits.total.value: 1 } - - match: { profile.shards.0.dfs.knn.query.0.type: "DocAndScoreQuery" } - - match: { profile.shards.0.dfs.knn.query.0.description: "DocAndScore[100]" } - - gt: { profile.shards.0.dfs.knn.query.0.time_in_nanos: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.set_min_competitive_score_count: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.set_min_competitive_score: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.match_count: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.match: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.shallow_advance_count: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.shallow_advance: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.next_doc_count: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.next_doc: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.score_count: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.score: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.compute_max_score_count: 0 } - - match: { profile.shards.0.dfs.knn.query.0.breakdown.compute_max_score: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.advance_count: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.advance: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.build_scorer_count: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.build_scorer: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.create_weight: 0 } - - gt: { profile.shards.0.dfs.knn.query.0.breakdown.create_weight_count: 0 } - - gt: { profile.shards.0.dfs.knn.rewrite_time: 0 } - - match: { profile.shards.0.dfs.knn.collector.0.name: "SimpleTopScoreDocCollector" } - - match: { profile.shards.0.dfs.knn.collector.0.reason: "search_top_hits" } - - gt: { profile.shards.0.dfs.knn.collector.0.time_in_nanos: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.type: "DocAndScoreQuery" } + - match: { profile.shards.0.dfs.knn.0.query.0.description: "DocAndScore[100]" } + - gt: { profile.shards.0.dfs.knn.0.query.0.time_in_nanos: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.set_min_competitive_score_count: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.set_min_competitive_score: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.match_count: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.match: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.shallow_advance_count: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.shallow_advance: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.next_doc_count: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.next_doc: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.score_count: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.score: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.compute_max_score_count: 0 } + - match: { profile.shards.0.dfs.knn.0.query.0.breakdown.compute_max_score: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.advance_count: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.advance: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.build_scorer_count: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.build_scorer: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.create_weight: 0 } + - gt: { profile.shards.0.dfs.knn.0.query.0.breakdown.create_weight_count: 0 } + - gt: { profile.shards.0.dfs.knn.0.rewrite_time: 0 } + - match: { profile.shards.0.dfs.knn.0.collector.0.name: "SimpleTopScoreDocCollector" } + - match: { profile.shards.0.dfs.knn.0.collector.0.reason: "search_top_hits" } + - gt: { profile.shards.0.dfs.knn.0.collector.0.time_in_nanos: 0 } --- dfs profile for search with dfs_query_then_fetch: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/380_sort_segments_on_timestamp.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/380_sort_segments_on_timestamp.yml index 8a1f6b03ef8b..fb45d63aad7f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/380_sort_segments_on_timestamp.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/380_sort_segments_on_timestamp.yml @@ -15,7 +15,6 @@ type: date settings: number_of_shards: 1 - number_of_replicas: 0 # 1st segment - do: @@ -54,7 +53,6 @@ body: settings: number_of_shards: 1 - number_of_replicas: 0 # 1st segment - do: @@ -114,7 +112,6 @@ type: date settings: number_of_shards: 1 - number_of_replicas: 0 # 1st segment missing @timestamp field - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml index 902f7086e94d..e013992e06c9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/400_synthetic_source.yml @@ -436,3 +436,61 @@ _source filtering: - match: hits.hits.0._source: kwd: foo + +--- +_doc_count: + - skip: + version: " - 8.6.99" + reason: bug caused by many not having _doc_count fixed in 8.7.0 + + - do: + indices.create: + index: test + body: + mappings: + _source: + mode: synthetic + + - do: + index: + index: test + id: 2 + body: + foo: baz + - do: + index: + index: test + id: 3 + body: + foo: baz + - do: + index: + index: test + id: 4 + body: + foo: baz + - do: + index: + index: test + id: 1 + body: + _doc_count: 3 + foo: bar + - do: + indices.refresh: {} + + - do: + search: + index: test + body: + sort: foo.keyword + - is_false: hits.hits.0.fields + - is_false: hits.hits.1.fields + - match: + hits.hits.0._source: + _doc_count: 3 + foo: bar + - match: + hits.hits.1._source: + foo: baz + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/suggest/40_typed_keys.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/suggest/40_typed_keys.yml index daac7d895611..89e26ec113f4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/suggest/40_typed_keys.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/suggest/40_typed_keys.yml @@ -4,8 +4,6 @@ setup: indices.create: index: test body: - settings: - number_of_replicas: 0 mappings: properties: title: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml index 458b27149fd8..467002d2243e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml @@ -9,7 +9,6 @@ add time series mappings: body: settings: index: - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -60,7 +59,6 @@ can't shadow dimensions: body: settings: index: - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -126,7 +124,6 @@ can't shadow metrics: body: settings: index: - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -192,7 +189,6 @@ no _tsid in standard indices: body: settings: index: - number_of_replicas: 0 number_of_shards: 2 mappings: properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml index 786df469954f..dbc8076c0a1a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml @@ -16,7 +16,6 @@ date: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -65,7 +64,6 @@ date_nanos: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -114,7 +112,6 @@ automatically add with date: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -162,7 +159,6 @@ reject @timestamp with wrong type: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -187,7 +183,6 @@ reject timestamp meta field with wrong type: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: _data_stream_timestamp: @@ -210,7 +205,6 @@ enable timestamp meta field: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: _data_stream_timestamp: @@ -240,7 +234,6 @@ reject bad timestamp meta field: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: _data_stream_timestamp: enabled @@ -262,7 +255,6 @@ write without timestamp: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -302,7 +294,6 @@ explicitly enable timestamp meta field: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -353,7 +344,6 @@ unable to create a time series index with @timestamp runtime field: body: settings: number_of_shards: 1 - number_of_replicas: 0 index: mode: time_series routing_path: [metricset, k8s.pod.uid] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index a5f0155f950c..a1c1c87ee687 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -413,3 +413,124 @@ nested fields: rx: type: long time_series_metric: gauge + +--- +regular source: + - skip: + version: " - 8.6.99" + reason: synthetic source + + - do: + catch: '/time series indices only support synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + mode: stored + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true +--- +disabled source: + - skip: + version: " - 8.6.99" + reason: synthetic source + + - do: + catch: '/time series indices only support synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + mode: disabled + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + +--- +source include/exclude: + - skip: + version: " - 8.6.99" + reason: synthetic source + + - do: + catch: '/filtering the stored _source is incompatible with synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + includes: [a] + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + + - do: + catch: '/filtering the stored _source is incompatible with synthetic source/' + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + _source: + excludes: [b] + properties: + "@timestamp": + type: date + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml index ee67c2fe33b6..a2aab4d8925f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_id_generation.yml @@ -14,7 +14,6 @@ setup: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index 03872f79a037..f78f16780cb4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -148,94 +148,3 @@ clone: - match: {hits.total.value: 1} - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} - ---- -clone no source index: - - skip: - version: " - 8.1.99" - reason: tsdb indexing changed in 8.2.0 - - - do: - indices.create: - index: test_no_source - body: - settings: - index: - mode: time_series - routing_path: [ metricset, k8s.pod.uid ] - time_series: - start_time: 2021-04-28T00:00:00Z - end_time: 2021-04-29T00:00:00Z - number_of_shards: 1 - number_of_replicas: 0 - mappings: - _source: - enabled: false - properties: - "@timestamp": - type: date - metricset: - type: keyword - time_series_dimension: true - k8s: - properties: - pod: - properties: - uid: - type: keyword - time_series_dimension: true - name: - type: keyword - ip: - type: ip - network: - properties: - tx: - type: long - rx: - type: long - - - do: - bulk: - refresh: true - index: test_no_source - body: - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - - - do: - indices.put_settings: - index: test_no_source - body: - index.blocks.write: true - - - do: - indices.clone: - index: test_no_source - target: test_no_source_clone - - - do: - search: - index: test_no_source_clone - body: - docvalue_fields: - - field: _tsid - query: - query_string: - query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml index 782e8226ebb3..adb71b912572 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_unsupported_operations.yml @@ -14,7 +14,6 @@ setup: time_series: start_time: 2021-04-28T00:00:00Z end_time: 2021-04-29T00:00:00Z - number_of_replicas: 0 number_of_shards: 2 mappings: properties: @@ -229,7 +228,7 @@ aggregate on _id: index: test body: size: 1 - aggs: + aggs: id: terms: field: _id diff --git a/server/build.gradle b/server/build.gradle index 9e1985d5643c..b360afa1ad62 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -77,6 +77,12 @@ dependencies { } +spotless { + java { + targetExclude "src/main/java/org/elasticsearch/Version.java" + } +} + tasks.named("forbiddenPatterns").configure { exclude '**/*.json' exclude '**/*.jmx' @@ -229,6 +235,14 @@ tasks.named("thirdPartyAudit").configure { 'org.zeromq.ZMQ', ) ignoreMissingClasses 'javax.xml.bind.DatatypeConverter' + + if (BuildParams.runtimeJavaVersion == JavaVersion.VERSION_20) { + ignoreMissingClasses( + // This class was removed in Java 20 but is only referenced by a class that requires preview features anyhow + // See: https://github.com/apache/lucene/pull/12042 + 'java.lang.foreign.MemorySession', + ) + } } tasks.named("dependencyLicenses").configure { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index 9ceeba1c7a59..76200604338d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -366,7 +366,7 @@ public void testUnassignedReplicaWithPriorCopy() throws Exception { assertThat(d.getExplanation(), startsWith("a copy of this shard is already allocated to this node [")); } else if (d.label().equals("filter") && nodeHoldingPrimary == false) { assertEquals(Decision.Type.NO, d.type()); - assertEquals(formatted(""" + assertEquals(Strings.format(""" node does not match index setting [index.routing.allocation.include] \ filters [_name:"%s"]\ """, primaryNodeName), d.getExplanation()); @@ -914,7 +914,7 @@ public void testBetterBalanceButCannotAllocate() throws Exception { for (Decision d : result.getCanAllocateDecision().getDecisions()) { if (d.label().equals("filter")) { assertEquals(Decision.Type.NO, d.type()); - assertEquals(formatted(""" + assertEquals(Strings.format(""" node does not match index setting [index.routing.allocation.include] filters [_name:"%s"]\ """, primaryNodeName), d.getExplanation()); } else { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 2ce42b653c1f..3bcd7626a5f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -508,8 +508,8 @@ protected void doExecute(Task task, TestRequest request, ActionListener subRequests = request.subRequests; GroupedActionListener groupedListener = new GroupedActionListener<>( - listener.map(r -> new TestResponse()), - subRequests.size() + 1 + subRequests.size() + 1, + listener.map(r -> new TestResponse()) ); transportService.getThreadPool().generic().execute(ActionRunnable.supply(groupedListener, () -> { assertTrue(beforeExecuteLatches.get(request).await(60, TimeUnit.SECONDS)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index f019438efd92..36738d4f7681 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.RemovedTaskListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; @@ -264,7 +265,7 @@ public void testTransportBroadcastReplicationTasks() { // we will have as many [s][p] and [s][r] tasks as we have primary and replica shards assertEquals(numberOfShards.totalNumShards, numberOfEvents(RefreshAction.NAME + "[s][*]", Tuple::v1)); - // we the [s][p] and [s][r] tasks should have a corresponding [s] task on the same node as a parent + // the [s][p] and [s][r] tasks should have a corresponding [s] task on the same node as a parent List spEvents = findEvents(RefreshAction.NAME + "[s][*]", Tuple::v1); for (TaskInfo taskInfo : spEvents) { List sTask; @@ -272,17 +273,17 @@ public void testTransportBroadcastReplicationTasks() { // A [s][p] level task should have a corresponding [s] level task on the same node sTask = findEvents( RefreshAction.NAME + "[s]", - event -> event.v1() - && taskInfo.taskId().getNodeId().equals(event.v2().taskId().getNodeId()) - && taskInfo.description().equals(event.v2().description()) + event -> event.v1() // task registration event + && event.v2().taskId().equals(taskInfo.parentTaskId()) + && event.v2().taskId().getNodeId().equals(taskInfo.taskId().getNodeId()) ); } else { - // A [s][r] level task should have a corresponding [s] level task on the a different node (where primary is located) + // A [s][r] level task should have a corresponding [s] level task on a different node (where primary is located) sTask = findEvents( RefreshAction.NAME + "[s]", - event -> event.v1() - && taskInfo.parentTaskId().getNodeId().equals(event.v2().taskId().getNodeId()) - && taskInfo.description().equals(event.v2().description()) + event -> event.v1() // task registration event + && event.v2().taskId().equals(taskInfo.parentTaskId()) + && event.v2().taskId().getNodeId().equals(taskInfo.taskId().getNodeId()) == false ); } // There should be only one parent task @@ -636,6 +637,11 @@ public void onTaskRegistered(Task task) {} @Override public void onTaskUnregistered(Task task) {} + + @Override + public void subscribeForRemovedTasks(RemovedTaskListener removedTaskListener) { + waitForWaitingToStart.countDown(); + } }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index f60ab6204cc2..f4b1903bba67 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -8,15 +8,38 @@ package org.elasticsearch.action.admin.cluster.tasks; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; -public class ListTasksIT extends ESIntegTestCase { +public class ListTasksIT extends ESSingleNodeTestCase { public void testListTasksFilteredByDescription() { @@ -40,4 +63,152 @@ public void testListTasksValidation() { assertThat(ex.getMessage(), containsString("matching on descriptions is not available when [detailed] is false")); } + + public void testWaitForCompletion() throws Exception { + final var threadPool = getInstanceFromNode(ThreadPool.class); + final var threadContext = threadPool.getThreadContext(); + + final var barrier = new CyclicBarrier(2); + getInstanceFromNode(PluginsService.class).filterPlugins(TestPlugin.class).get(0).barrier = barrier; + + final var testActionFuture = new PlainActionFuture(); + client().execute(TEST_ACTION, new TestRequest(), testActionFuture.map(r -> { + assertThat(threadContext.getResponseHeaders().get(TestTransportAction.HEADER_NAME), hasItem(TestTransportAction.HEADER_VALUE)); + return r; + })); + + barrier.await(10, TimeUnit.SECONDS); + + final var listTasksResponse = client().admin().cluster().prepareListTasks().setActions(TestTransportAction.NAME).get(); + assertThat(listTasksResponse.getNodeFailures(), empty()); + assertEquals(1, listTasksResponse.getTasks().size()); + final var task = listTasksResponse.getTasks().get(0); + assertEquals(TestTransportAction.NAME, task.action()); + + final var listWaitFuture = new PlainActionFuture(); + client().admin() + .cluster() + .prepareListTasks() + .setTargetTaskId(task.taskId()) + .setWaitForCompletion(true) + .execute(listWaitFuture.delegateFailure((l, listResult) -> { + assertEquals(1, listResult.getTasks().size()); + assertEquals(task.taskId(), listResult.getTasks().get(0).taskId()); + // the task must now be complete: + client().admin().cluster().prepareListTasks().setActions(TestTransportAction.NAME).execute(l.map(listAfterWaitResult -> { + assertThat(listAfterWaitResult.getTasks(), empty()); + assertThat(listAfterWaitResult.getNodeFailures(), empty()); + assertThat(listAfterWaitResult.getTaskFailures(), empty()); + return null; + })); + // and we must not see its header: + assertNull(threadContext.getResponseHeaders().get(TestTransportAction.HEADER_NAME)); + })); + + // briefly fill up the management pool so that (a) we know the wait has started and (b) we know it's not blocking + flushThreadPool(threadPool, ThreadPool.Names.MANAGEMENT); + + final var getWaitFuture = new PlainActionFuture(); + client().admin() + .cluster() + .prepareGetTask(task.taskId()) + .setWaitForCompletion(true) + .execute(getWaitFuture.delegateFailure((l, getResult) -> { + assertTrue(getResult.getTask().isCompleted()); + assertEquals(task.taskId(), getResult.getTask().getTask().taskId()); + // the task must now be complete: + client().admin().cluster().prepareListTasks().setActions(TestTransportAction.NAME).execute(l.map(listAfterWaitResult -> { + assertThat(listAfterWaitResult.getTasks(), empty()); + assertThat(listAfterWaitResult.getNodeFailures(), empty()); + assertThat(listAfterWaitResult.getTaskFailures(), empty()); + return null; + })); + // and we must not see its header: + assertNull(threadContext.getResponseHeaders().get(TestTransportAction.HEADER_NAME)); + })); + + assertFalse(listWaitFuture.isDone()); + assertFalse(testActionFuture.isDone()); + barrier.await(10, TimeUnit.SECONDS); + testActionFuture.get(10, TimeUnit.SECONDS); + listWaitFuture.get(10, TimeUnit.SECONDS); + getWaitFuture.get(10, TimeUnit.SECONDS); + } + + private void flushThreadPool(ThreadPool threadPool, String executor) throws InterruptedException, BrokenBarrierException, + TimeoutException { + var maxThreads = threadPool.info(executor).getMax(); + var barrier = new CyclicBarrier(maxThreads + 1); + for (int i = 0; i < maxThreads; i++) { + threadPool.executor(executor).execute(() -> { + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + }); + } + barrier.await(10, TimeUnit.SECONDS); + } + + @Override + protected Collection> getPlugins() { + return List.of(TestPlugin.class); + } + + private static final ActionType TEST_ACTION = new ActionType<>( + TestTransportAction.NAME, + in -> ActionResponse.Empty.INSTANCE + ); + + public static class TestPlugin extends Plugin implements ActionPlugin { + volatile CyclicBarrier barrier; + + @Override + public List> getActions() { + return List.of(new ActionHandler<>(TEST_ACTION, TestTransportAction.class)); + } + } + + public static class TestRequest extends ActionRequest { + @Override + public ActionRequestValidationException validate() { + return null; + } + } + + public static class TestTransportAction extends HandledTransportAction { + + static final String NAME = "internal:test/action"; + + static final String HEADER_NAME = "HEADER_NAME"; + static final String HEADER_VALUE = "HEADER_VALUE"; + + private final TestPlugin testPlugin; + private final ThreadPool threadPool; + + @Inject + public TestTransportAction( + TransportService transportService, + ActionFilters actionFilters, + PluginsService pluginsService, + ThreadPool threadPool + ) { + super(NAME, transportService, actionFilters, in -> new TestRequest()); + testPlugin = pluginsService.filterPlugins(TestPlugin.class).get(0); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestRequest request, ActionListener listener) { + final var barrier = testPlugin.barrier; + assertNotNull(barrier); + threadPool.generic().execute(ActionRunnable.run(listener, () -> { + barrier.await(10, TimeUnit.SECONDS); + threadPool.getThreadContext().addResponseHeader(HEADER_NAME, HEADER_VALUE); + barrier.await(10, TimeUnit.SECONDS); + })); + } + } + } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 8b9173ed9945..568353ab2f1b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -38,7 +38,7 @@ public void testPendingTasksWithIndexBlocks() { try { enableIndexBlock("test", blockSetting); PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().get(); - assertNotNull(response.getPendingTasks()); + assertNotNull(response.pendingTasks()); } finally { disableIndexBlock("test", blockSetting); } @@ -54,7 +54,7 @@ public void testPendingTasksWithClusterReadOnlyBlock() { try { setClusterReadOnly(true); PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().get(); - assertNotNull(response.getPendingTasks()); + assertNotNull(response.pendingTasks()); } finally { setClusterReadOnly(false); } @@ -80,7 +80,7 @@ public boolean validateClusterForming() { } }); - assertNotNull(client().admin().cluster().preparePendingClusterTasks().get().getPendingTasks()); + assertNotNull(client().admin().cluster().preparePendingClusterTasks().get().pendingTasks()); // starting one more node allows the cluster to recover internalCluster().startNode(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index eb86ecd83e78..d3ac559a2104 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -73,14 +73,15 @@ protected boolean forbidPrivateIndexSettings() { } public void testCreateSplitIndexToN() throws IOException { + assumeFalse("https://github.com/elastic/elasticsearch/issues/33857", Constants.WINDOWS); + int[][] possibleShardSplits = new int[][] { { 2, 4, 8 }, { 3, 6, 12 }, { 1, 2, 4 } }; int[] shardSplits = randomFrom(possibleShardSplits); splitToN(shardSplits[0], shardSplits[1], shardSplits[2]); } public void testSplitFromOneToN() { - - assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + assumeFalse("https://github.com/elastic/elasticsearch/issues/33857", Constants.WINDOWS); splitToN(1, 5, 10); client().admin().indices().prepareDelete("*").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 5a24caabbd1a..ba1e060efd91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -837,8 +837,8 @@ public void testRolloverConcurrently() throws Exception { .prepareRolloverIndex(aliasName) .waitForActiveShards(ActiveShardCount.NONE) .get(); - assertThat(response.getOldIndex(), equalTo(aliasName + formatted("-%06d", j))); - assertThat(response.getNewIndex(), equalTo(aliasName + formatted("-%06d", j + 1))); + assertThat(response.getOldIndex(), equalTo(aliasName + Strings.format("-%06d", j))); + assertThat(response.getNewIndex(), equalTo(aliasName + Strings.format("-%06d", j + 1))); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); } @@ -859,7 +859,7 @@ public void testRolloverConcurrently() throws Exception { for (int j = 1; j <= numOfIndices; j++) { AliasMetadata.Builder amBuilder = new AliasMetadata.Builder(aliasName); amBuilder.writeIndex(j == numOfIndices); - expected.add(Map.entry(aliasName + formatted("-%06d", j), List.of(amBuilder.build()))); + expected.add(Map.entry(aliasName + Strings.format("-%06d", j), List.of(amBuilder.build()))); } assertThat(actual, containsInAnyOrder(expected.toArray(Object[]::new))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java new file mode 100644 index 000000000000..a9e66a912504 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2IT.java @@ -0,0 +1,302 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequestBuilder; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.Requests; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class BulkProcessor2IT extends ESIntegTestCase { + + public void testThatBulkProcessor2CountIsCorrect() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessor2TestListener listener = new BulkProcessor2TestListener(latch); + + int numDocs = randomIntBetween(10, 100); + BulkProcessor2 processor = BulkProcessor2.builder(client()::bulk, listener, client().threadPool()) + // let's make sure that the bulk action limit trips, one single execution will index all the documents + .setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)) + .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build(); + try { + + MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs); + assertThat(processor.getTotalBytesInFlight(), equalTo(0L)); + } finally { + processor.awaitClose(5, TimeUnit.SECONDS); + } + } + + public void testBulkProcessor2ConcurrentRequests() throws Exception { + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + BulkProcessor2TestListener listener = new BulkProcessor2TestListener(latch, closeLatch); + + MultiGetRequestBuilder multiGetRequestBuilder; + BulkProcessor2 processor = BulkProcessor2.builder(client()::bulk, listener, client().threadPool()) + .setBulkActions(bulkActions) + // set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)) + .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build(); + try { + + multiGetRequestBuilder = indexDocs(client(), processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs - numDocs % bulkActions)); + } finally { + processor.awaitClose(5, TimeUnit.SECONDS); + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs)); + + Set ids = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + // with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); + // we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } + + assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs); + assertThat(processor.getTotalBytesInFlight(), equalTo(0L)); + } + + public void testBulkProcessor2WaitOnClose() throws Exception { + BulkProcessor2TestListener listener = new BulkProcessor2TestListener(); + + int numDocs = randomIntBetween(10, 100); + BulkProcessor2 processor = BulkProcessor2.builder(client()::bulk, listener, client().threadPool()) + // let's make sure that the bulk action limit trips, one single execution will index all the documents + .setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)) + .setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .build(); + + MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs); + processor.awaitClose(1, TimeUnit.MINUTES); + assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1)); + assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs); + } + + public void testBulkProcessor2ConcurrentRequestsReadOnlyIndex() throws Exception { + createIndex("test-ro"); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test-ro") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_BLOCKS_WRITE, true)) + ); + ensureGreen(); + + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + int testDocs = 0; + int testReadOnlyDocs = 0; + MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet(); + BulkProcessor2TestListener listener = new BulkProcessor2TestListener(latch, closeLatch); + + BulkProcessor2 processor = BulkProcessor2.builder(client()::bulk, listener, client().threadPool()) + .setBulkActions(bulkActions) + // set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)) + .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build(); + try { + + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + testDocs++; + processor.add( + new IndexRequest("test").id(Integer.toString(testDocs)).source(Requests.INDEX_CONTENT_TYPE, "field", "value") + ); + multiGetRequestBuilder.add("test", Integer.toString(testDocs)); + } else { + testReadOnlyDocs++; + processor.add( + new IndexRequest("test-ro").id(Integer.toString(testReadOnlyDocs)) + .source(Requests.INDEX_CONTENT_TYPE, "field", "value") + ); + } + } + } finally { + processor.awaitClose(5, TimeUnit.SECONDS); + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(testDocs + testReadOnlyDocs)); + assertThat(processor.getTotalBytesInFlight(), equalTo(0L)); + Set ids = new HashSet<>(); + Set readOnlyIds = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); + if (bulkItemResponse.getIndex().equals("test")) { + assertThat(bulkItemResponse.isFailed(), equalTo(false)); + // with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testDocs))); + // we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } else { + assertThat(bulkItemResponse.isFailed(), equalTo(true)); + // with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testReadOnlyDocs))); + // we do want to check that we don't get duplicate ids back + assertThat(readOnlyIds.add(bulkItemResponse.getId()), equalTo(true)); + } + } + + assertMultiGetResponse(multiGetRequestBuilder.get(), testDocs); + } + + private static MultiGetRequestBuilder indexDocs(Client client, BulkProcessor2 processor, int numDocs) throws Exception { + MultiGetRequestBuilder multiGetRequestBuilder = client.prepareMultiGet(); + for (int i = 1; i <= numDocs; i++) { + processor.add( + new IndexRequest("test").id(Integer.toString(i)) + .source(Requests.INDEX_CONTENT_TYPE, "field", randomRealisticUnicodeOfLengthBetween(1, 30)) + ); + multiGetRequestBuilder.add("test", Integer.toString(i)); + } + return multiGetRequestBuilder; + } + + private static void assertResponseItems(List bulkItemResponses, int numDocs) { + assertThat(bulkItemResponses.size(), is(numDocs)); + int i = 1; + List sortedResponses = bulkItemResponses.stream() + .sorted(Comparator.comparing(o -> Integer.valueOf(o.getId()))) + .toList(); + for (BulkItemResponse bulkItemResponse : sortedResponses) { + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); + assertThat( + "item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), + bulkItemResponse.isFailed(), + equalTo(false) + ); + } + } + + private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, int numDocs) { + assertThat(multiGetResponse.getResponses().length, equalTo(numDocs)); + int i = 1; + for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) { + assertThat(multiGetItemResponse.getIndex(), equalTo("test")); + assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++))); + } + } + + private static class BulkProcessor2TestListener implements BulkProcessor2.Listener { + + private final CountDownLatch[] latches; + private final AtomicInteger beforeCounts = new AtomicInteger(); + private final AtomicInteger afterCounts = new AtomicInteger(); + private final List bulkItems = new CopyOnWriteArrayList<>(); + private final List bulkFailures = new CopyOnWriteArrayList<>(); + + private BulkProcessor2TestListener(CountDownLatch... latches) { + this.latches = latches; + } + + @Override + public void beforeBulk(long executionId, BulkRequest request) { + beforeCounts.incrementAndGet(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + bulkItems.addAll(Arrays.asList(response.getItems())); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Exception failure) { + bulkFailures.add(failure); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java new file mode 100644 index 000000000000..bdf5ca3c4f52 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkProcessor2RetryIT.java @@ -0,0 +1,169 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class BulkProcessor2RetryIT extends ESIntegTestCase { + private static final String INDEX_NAME = "test"; + Map requestToExecutionCountMap = new ConcurrentHashMap<>(); + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + // Have very low pool and queue sizes to overwhelm internal pools easily + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + // don't mess with this one! It's quite sensitive to a low queue size + // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) + // .put("thread_pool.listener.queue_size", 1) + .put("thread_pool.get.queue_size", 1) + // default is 200 + .put("thread_pool.write.queue_size", 30) + .build(); + } + + public void testBulkRejectionLoadWithoutBackoff() throws Throwable { + boolean rejectedExecutionExpected = true; + executeBulkRejectionLoad(0, rejectedExecutionExpected); + } + + // @TestLogging( + // value = "org.elasticsearch.action.bulk.Retry2:trace", + // reason = "Logging information about locks useful for tracking down deadlock" + // ) + public void testBulkRejectionLoadWithBackoff() throws Throwable { + boolean rejectedExecutionExpected = false; + executeBulkRejectionLoad(8, rejectedExecutionExpected); + } + + @SuppressWarnings("unchecked") + private void executeBulkRejectionLoad(int maxRetries, boolean rejectedExecutionExpected) throws Throwable { + int numberOfAsyncOps = randomIntBetween(600, 700); + final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps); + final Set successfulResponses = Collections.newSetFromMap(new ConcurrentHashMap<>()); + final Set> failedResponses = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + assertAcked(prepareCreate(INDEX_NAME)); + ensureGreen(); + + BulkProcessor2 bulkProcessor = BulkProcessor2.builder(this::countAndBulk, new BulkProcessor2.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + // no op + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + successfulResponses.add(response); + latch.countDown(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Exception failure) { + failedResponses.add(Tuple.tuple(request, failure)); + latch.countDown(); + } + }, client().threadPool()).setBulkActions(1).setMaxNumberOfRetries(maxRetries).build(); + indexDocs(bulkProcessor, numberOfAsyncOps); + latch.await(10, TimeUnit.SECONDS); + bulkProcessor.awaitClose(1, TimeUnit.SECONDS); + assertThat(successfulResponses.size() + failedResponses.size(), equalTo(numberOfAsyncOps)); + // validate all responses + boolean rejectedAfterAllRetries = false; + for (BulkResponse bulkResponse : successfulResponses) { + for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) { + if (bulkItemResponse.isFailed()) { + BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); + if (failure.getStatus() == RestStatus.TOO_MANY_REQUESTS) { + if (rejectedExecutionExpected == false) { + int count = requestToExecutionCountMap.get(bulkItemResponse.getId()); + if (count < maxRetries + 1) { + throw new AssertionError("Got rejected although backoff policy would allow more retries"); + } + rejectedAfterAllRetries = true; + } + } else { + throw new AssertionError("Unexpected failure status: " + failure.getStatus()); + } + } + } + } + for (Tuple failureTuple : failedResponses) { + if (ExceptionsHelper.status(failureTuple.v2()) == RestStatus.TOO_MANY_REQUESTS) { + if (rejectedExecutionExpected == false) { + for (DocWriteRequest request : failureTuple.v1().requests) { + int count = requestToExecutionCountMap.get(request.id()); + if (count < maxRetries + 1) { + throw new AssertionError("Got rejected although backoff policy would allow more retries"); + } + } + rejectedAfterAllRetries = true; + } + // ignored, we exceeded the write queue size when dispatching the initial bulk request + } else { + Throwable t = failureTuple.v2(); + // we're not expecting any other errors + throw new AssertionError("Unexpected failure", t); + } + } + + client().admin().indices().refresh(new RefreshRequest()).get(); + + SearchResponse results = client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).setSize(0).get(); + assertThat(bulkProcessor.getTotalBytesInFlight(), equalTo(0L)); + if (rejectedExecutionExpected) { + assertThat((int) results.getHits().getTotalHits().value, lessThanOrEqualTo(numberOfAsyncOps)); + } else if (rejectedAfterAllRetries) { + assertThat((int) results.getHits().getTotalHits().value, lessThan(numberOfAsyncOps)); + } else { + assertThat((int) results.getHits().getTotalHits().value, equalTo(numberOfAsyncOps)); + } + } + + private static void indexDocs(BulkProcessor2 processor, int numDocs) { + for (int i = 1; i <= numDocs; i++) { + processor.add( + client().prepareIndex() + .setIndex(INDEX_NAME) + .setId(Integer.toString(i)) + .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .request() + ); + } + } + + void countAndBulk(BulkRequest request, ActionListener listener) { + for (DocWriteRequest docWriteRequest : request.requests) { + requestToExecutionCountMap.compute(docWriteRequest.id(), (key, value) -> value == null ? 1 : value + 1); + } + client().bulk(request, listener); + } + +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java deleted file mode 100644 index 9bf600483a71..000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/ingest/AsyncIngestProcessorIT.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.action.ingest; - -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.ingest.AbstractProcessor; -import org.elasticsearch.ingest.IngestDocument; -import org.elasticsearch.ingest.Processor; -import org.elasticsearch.plugins.IngestPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentType; - -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.function.BiConsumer; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.equalTo; - -/** - * The purpose of this test is to verify that when a processor executes an operation asynchronously that - * the expected result is the same as if the same operation happens synchronously. - * - * In this test two test processor are defined that basically do the same operation, but a single processor - * executes asynchronously. The result of the operation should be the same and also the order in which the - * bulk responses are returned should be the same as how the corresponding index requests were defined. - */ -public class AsyncIngestProcessorIT extends ESSingleNodeTestCase { - - @Override - protected Collection> getPlugins() { - return List.of(TestPlugin.class); - } - - public void testAsyncProcessorImplementation() { - // A pipeline with 2 processors: the test async processor and sync test processor. - BytesReference pipelineBody = new BytesArray("{\"processors\": [{\"test-async\": {}, \"test\": {}}]}"); - client().admin().cluster().putPipeline(new PutPipelineRequest("_id", pipelineBody, XContentType.JSON)).actionGet(); - - BulkRequest bulkRequest = new BulkRequest(); - int numDocs = randomIntBetween(8, 256); - for (int i = 0; i < numDocs; i++) { - bulkRequest.add(new IndexRequest("foobar").id(Integer.toString(i)).source("{}", XContentType.JSON).setPipeline("_id")); - } - BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); - assertThat(bulkResponse.getItems().length, equalTo(numDocs)); - for (int i = 0; i < numDocs; i++) { - String id = Integer.toString(i); - assertThat(bulkResponse.getItems()[i].getId(), equalTo(id)); - GetResponse getResponse = client().get(new GetRequest("foobar", id)).actionGet(); - // The expected result of async test processor: - assertThat(getResponse.getSource().get("foo"), equalTo("bar-" + id)); - // The expected result of sync test processor: - assertThat(getResponse.getSource().get("bar"), equalTo("baz-" + id)); - } - } - - public static class TestPlugin extends Plugin implements IngestPlugin { - - private ThreadPool threadPool; - - @Override - public Collection createComponents( - Client client, - ClusterService clusterService, - ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, - ScriptService scriptService, - NamedXContentRegistry xContentRegistry, - Environment environment, - NodeEnvironment nodeEnvironment, - NamedWriteableRegistry namedWriteableRegistry, - IndexNameExpressionResolver expressionResolver, - Supplier repositoriesServiceSupplier, - Tracer tracer, - AllocationDeciders allocationDeciders - ) { - this.threadPool = threadPool; - return List.of(); - } - - @Override - public Map getProcessors(Processor.Parameters parameters) { - return Map.of("test-async", (factories, tag, description, config) -> new AbstractProcessor(tag, description) { - - @Override - public void execute(IngestDocument ingestDocument, BiConsumer handler) { - threadPool.generic().execute(() -> { - String id = (String) ingestDocument.getSourceAndMetadata().get("_id"); - if (usually()) { - try { - Thread.sleep(10); - } catch (InterruptedException e) { - // ignore - } - } - ingestDocument.setFieldValue("foo", "bar-" + id); - handler.accept(ingestDocument, null); - }); - } - - @Override - public String getType() { - return "test-async"; - } - - @Override - public boolean isAsync() { - return true; - } - - }, "test", (processorFactories, tag, description, config) -> new AbstractProcessor(tag, description) { - @Override - public IngestDocument execute(IngestDocument ingestDocument) throws Exception { - String id = (String) ingestDocument.getSourceAndMetadata().get("_id"); - ingestDocument.setFieldValue("bar", "baz-" + id); - return ingestDocument; - } - - @Override - public String getType() { - return "test"; - } - }); - } - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index 7dab09997618..3cd40347c8de 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -184,7 +185,7 @@ public SearchTask createTask(long id, String type, String action, TaskId parentT private static List createRandomIndices(Client client) { int numIndices = randomIntBetween(3, 20); for (int i = 0; i < numIndices; i++) { - String indexName = formatted("index-%03d", i); + String indexName = Strings.format("index-%03d", i); assertAcked(client.admin().indices().prepareCreate(indexName).get()); client.prepareIndex(indexName).setSource("number", i, "foo", "bar").get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index 2d772e25131c..72fa2c033ace 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -10,7 +10,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -654,8 +654,8 @@ public long bytesToPreallocate() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java index 0cfc783f1895..38cf9d57dba3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/support/WaitActiveShardCountIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; @@ -126,7 +127,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { } private String source(String id, String nameValue) { - return formatted(""" + return Strings.format(""" { "type1" : { "id" : "%s", "name" : "%s" } } """, id, nameValue); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index a85713b8fe75..b4c4a778620a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -275,26 +274,16 @@ private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds int replicaCount = randomIntBetween(1, 10); Set availableNodeIds = Sets.newHashSet(nodeIds); for (int j = 0; j < replicaCount; j++) { - UnassignedInfo unassignedInfo = null; - if (randomInt(5) == 1) { - unassignedInfo = randomUnassignedInfo(randomAlphaOfLength(10)); - } + var newState = j == 0 ? ShardRoutingState.STARTED : randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED); + var unassignedInfo = randomInt(5) == 1 && newState == ShardRoutingState.INITIALIZING + ? randomUnassignedInfo(randomAlphaOfLength(10)) + : null; if (availableNodeIds.isEmpty()) { break; } String nodeId = randomFrom(availableNodeIds); availableNodeIds.remove(nodeId); - indexShard.addShard( - TestShardRouting.newShardRouting( - index, - i, - nodeId, - null, - j == 0, - ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)), - unassignedInfo - ) - ); + indexShard.addShard(TestShardRouting.newShardRouting(index, i, nodeId, null, j == 0, newState, unassignedInfo)); } builder.addIndexShard(indexShard); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 5f68717217a0..19f8a3c8bc87 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -13,17 +13,40 @@ import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateNodeRemovalAction; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateNodeRemovalRequest; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateNodeRemovalResponse; +import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateShardPathAction; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.indices.store.IndicesStore; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.CountDownLatch; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.oneOf; +import static org.hamcrest.Matchers.startsWith; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class PrevalidateNodeRemovalIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + public void testNodeRemovalFromNonRedCluster() throws Exception { internalCluster().startMasterOnlyNode(); String node1 = internalCluster().startDataOnlyNode(); @@ -45,10 +68,13 @@ public void testNodeRemovalFromNonRedCluster() throws Exception { } PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req.build()).get(); assertTrue(resp.getPrevalidation().isSafe()); + assertThat(resp.getPrevalidation().message(), equalTo("cluster status is not RED")); assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); NodesRemovalPrevalidation.NodeResult nodeResult = resp.getPrevalidation().nodes().get(0); assertNotNull(nodeResult); assertThat(nodeResult.name(), equalTo(nodeName)); + assertThat(nodeResult.result().reason(), equalTo(NodesRemovalPrevalidation.Reason.NO_PROBLEMS)); + assertThat(nodeResult.result().message(), equalTo("")); assertTrue(nodeResult.result().isSafe()); // Enforce a replica to get unassigned updateIndexSettings(indexName, Settings.builder().put("index.routing.allocation.require._name", node1)); @@ -56,25 +82,145 @@ public void testNodeRemovalFromNonRedCluster() throws Exception { PrevalidateNodeRemovalRequest req2 = PrevalidateNodeRemovalRequest.builder().setNames(node2).build(); PrevalidateNodeRemovalResponse resp2 = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req2).get(); assertTrue(resp2.getPrevalidation().isSafe()); + assertThat(resp2.getPrevalidation().message(), equalTo("cluster status is not RED")); assertThat(resp2.getPrevalidation().nodes().size(), equalTo(1)); NodesRemovalPrevalidation.NodeResult nodeResult2 = resp2.getPrevalidation().nodes().get(0); assertNotNull(nodeResult2); assertThat(nodeResult2.name(), equalTo(node2)); assertTrue(nodeResult2.result().isSafe()); + assertThat(nodeResult2.result().reason(), equalTo(NodesRemovalPrevalidation.Reason.NO_PROBLEMS)); + assertThat(nodeResult2.result().message(), equalTo("")); } - public void testNodeRemovalFromRedCluster() throws Exception { + // Test that in case the nodes that are being prevalidated do not contain copies of any of the + // red shards, their removal is considered to be safe. + public void testNodeRemovalFromRedClusterWithNoLocalShardCopy() throws Exception { internalCluster().startMasterOnlyNode(); - String node1 = internalCluster().startDataOnlyNode(); - String node2 = internalCluster().startDataOnlyNode(); + String nodeWithIndex = internalCluster().startDataOnlyNode(); + List otherNodes = internalCluster().startDataOnlyNodes(randomIntBetween(1, 3)); // Create an index pinned to one node, and then stop that node so the index is RED. String indexName = "test-idx"; createIndex( indexName, - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put("index.routing.allocation.require._name", node1).build() + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.routing.allocation.require._name", nodeWithIndex) + .build() ); ensureYellow(indexName); + internalCluster().stopNode(nodeWithIndex); + ensureRed(indexName); + String[] otherNodeNames = otherNodes.toArray(new String[otherNodes.size()]); + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(otherNodeNames).build(); + PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); + assertTrue(resp.getPrevalidation().isSafe()); + assertThat(resp.getPrevalidation().message(), equalTo("")); + assertThat(resp.getPrevalidation().nodes().size(), equalTo(otherNodes.size())); + for (NodesRemovalPrevalidation.NodeResult nodeResult : resp.getPrevalidation().nodes()) { + assertThat(nodeResult.name(), oneOf(otherNodeNames)); + assertThat(nodeResult.result().reason(), equalTo(NodesRemovalPrevalidation.Reason.NO_RED_SHARDS_ON_NODE)); + assertTrue(nodeResult.result().isSafe()); + } + } + + public void testNodeRemovalFromRedClusterWithLocalShardCopy() throws Exception { + internalCluster().startMasterOnlyNode(); + String node1 = internalCluster().startDataOnlyNode(); + String node2 = internalCluster().startDataOnlyNode(); + String indexName = "test-idx"; + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.require._name", node1) + .build() + ); + ensureGreen(indexName); + // Prevent node1 from removing its local index shard copies upon removal, by blocking + // its ACTION_SHARD_EXISTS requests since after a relocation, the source first waits + // until the shard exists somewhere else, then it removes it locally. + final CountDownLatch shardActiveRequestSent = new CountDownLatch(1); + MockTransportService node1transport = (MockTransportService) internalCluster().getInstance(TransportService.class, node1); + TransportService node2transport = internalCluster().getInstance(TransportService.class, node2); + node1transport.addSendBehavior(node2transport, (connection, requestId, action, request, options) -> { + if (action.equals(IndicesStore.ACTION_SHARD_EXISTS)) { + shardActiveRequestSent.countDown(); + logger.info("prevent shard active request from being sent"); + throw new ConnectTransportException(connection.getNode(), "DISCONNECT: simulated"); + } + connection.sendRequest(requestId, action, request, options); + }); + logger.info("--> move shard from {} to {}, and wait for relocation to finish", node1, node2); + updateIndexSettings(indexName, Settings.builder().put("index.routing.allocation.require._name", node2)); + shardActiveRequestSent.await(); + ensureGreen(indexName); + // To ensure that the index doesn't get relocated back to node1 after stopping node2, we + // index a doc to make the index copy on node1 (in case not deleted after the relocation) stale. + indexDoc(indexName, "some_id", "foo", "bar"); + internalCluster().stopNode(node2); + ensureRed(indexName); + // Ensure that node1 still has data for the unassigned index + NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, node1); + Index index = internalCluster().clusterService().state().metadata().index(indexName).getIndex(); + ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, 0), ""); + assertNotNull("local index shards not found", shardPath); + // Prevalidate removal of node1 + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(node1).build(); + PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); + String node1Id = internalCluster().clusterService(node1).localNode().getId(); + assertFalse(resp.getPrevalidation().isSafe()); + assertThat(resp.getPrevalidation().message(), equalTo("removal of the following nodes might not be safe: [" + node1Id + "]")); + assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); + NodesRemovalPrevalidation.NodeResult nodeResult = resp.getPrevalidation().nodes().get(0); + assertThat(nodeResult.name(), equalTo(node1)); + assertFalse(nodeResult.result().isSafe()); + assertThat(nodeResult.result().reason(), equalTo(NodesRemovalPrevalidation.Reason.RED_SHARDS_ON_NODE)); + assertThat(nodeResult.result().message(), equalTo("node contains copies of the following red shards: [[" + indexName + "][0]]")); + } + + public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { + internalCluster().startMasterOnlyNode(); + String node1 = internalCluster().startDataOnlyNode(); + String node2 = internalCluster().startDataOnlyNode(); + String indexName = "test-index"; + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.routing.allocation.require._name", node1) + .build() + ); + ensureGreen(indexName); + // make it red! internalCluster().stopNode(node1); + ensureRed(indexName); + MockTransportService node2TransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, node2); + node2TransportService.addRequestHandlingBehavior( + TransportPrevalidateShardPathAction.ACTION_NAME + "[n]", + (handler, request, channel, task) -> { logger.info("drop the check shards request"); } + ); + PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder() + .setNames(node2) + .build() + .timeout(TimeValue.timeValueSeconds(1)); + PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); + assertFalse("prevalidation result should return false", resp.getPrevalidation().isSafe()); + String node2Id = internalCluster().clusterService(node2).localNode().getId(); + assertThat( + resp.getPrevalidation().message(), + equalTo("cannot prevalidate removal of nodes with the following IDs: [" + node2Id + "]") + ); + assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); + NodesRemovalPrevalidation.NodeResult nodeResult = resp.getPrevalidation().nodes().get(0); + assertThat(nodeResult.name(), equalTo(node2)); + assertFalse(nodeResult.result().isSafe()); + assertThat(nodeResult.result().message(), startsWith("failed contacting the node")); + assertThat(nodeResult.result().reason(), equalTo(NodesRemovalPrevalidation.Reason.UNABLE_TO_VERIFY)); + } + + private void ensureRed(String indexName) throws Exception { assertBusy(() -> { ClusterHealthResponse healthResponse = client().admin() .cluster() @@ -85,15 +231,5 @@ public void testNodeRemovalFromRedCluster() throws Exception { .actionGet(); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); }); - // With a RED non-searchable-snapshot index, node removal is potentially unsafe - // since that node might have the last copy of the unassigned index. - PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder().setNames(node2).build(); - PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); - assertFalse(resp.getPrevalidation().isSafe()); - assertThat(resp.getPrevalidation().message(), equalTo("cluster health is RED")); - assertThat(resp.getPrevalidation().nodes().size(), equalTo(1)); - NodesRemovalPrevalidation.NodeResult nodeResult = resp.getPrevalidation().nodes().get(0); - assertThat(nodeResult.name(), equalTo(node2)); - assertFalse(nodeResult.result().isSafe()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java new file mode 100644 index 000000000000..66bcf3ab3242 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateShardPathIT.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.action.admin.cluster.node.shutdown.NodePrevalidateShardPathResponse; +import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateShardPathRequest; +import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateShardPathResponse; +import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateShardPathAction; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +/* + * We rely on the shard directory being deleted after the relocation. This removal sometimes fails + * with "java.io.IOException: access denied" when using WindowsFS which seems to be a known issue. + * See {@link FileSystemUtilsTests}. + */ +@LuceneTestCase.SuppressFileSystems(value = "WindowsFS") +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class PrevalidateShardPathIT extends ESIntegTestCase { + + public void testCheckShards() throws Exception { + internalCluster().startMasterOnlyNode(); + String node1 = internalCluster().startDataOnlyNode(); + String node2 = internalCluster().startDataOnlyNode(); + String indexName = "index1"; + int index1shards = randomIntBetween(1, 5); + createIndex("index1", Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, index1shards).build()); + ensureGreen(indexName); + var shardIds = clusterService().state() + .routingTable() + .allShards(indexName) + .stream() + .map(ShardRouting::shardId) + .collect(Collectors.toSet()); + String node1Id = internalCluster().clusterService(node1).localNode().getId(); + String node2Id = internalCluster().clusterService(node2).localNode().getId(); + Set shardIdsToCheck = new HashSet<>(shardIds); + boolean includeUnknownShardId = randomBoolean(); + if (includeUnknownShardId) { + shardIdsToCheck.add(new ShardId(randomAlphaOfLength(10), UUIDs.randomBase64UUID(), randomIntBetween(0, 10))); + } + PrevalidateShardPathRequest req = new PrevalidateShardPathRequest(shardIdsToCheck, node1Id, node2Id); + PrevalidateShardPathResponse resp = client().execute(TransportPrevalidateShardPathAction.TYPE, req).get(); + var nodeResponses = resp.getNodes(); + assertThat(nodeResponses.size(), equalTo(2)); + assertThat(nodeResponses.stream().map(r -> r.getNode().getId()).collect(Collectors.toSet()), equalTo(Set.of(node1Id, node2Id))); + assertTrue(resp.failures().isEmpty()); + for (NodePrevalidateShardPathResponse nodeResponse : nodeResponses) { + assertThat(nodeResponse.getShardIds(), equalTo(shardIds)); + } + // Check that after relocation the source node doesn't have the shard path + String node3 = internalCluster().startDataOnlyNode(); + updateIndexSettings(indexName, Settings.builder().put("index.routing.allocation.exclude._name", node2)); + ensureGreen(indexName); + assertBusy(() -> { + try { + // The excluded node should eventually delete the shards + PrevalidateShardPathRequest req2 = new PrevalidateShardPathRequest(shardIdsToCheck, node2Id); + PrevalidateShardPathResponse resp2 = client().execute(TransportPrevalidateShardPathAction.TYPE, req2).get(); + assertThat(resp2.getNodes().size(), equalTo(1)); + assertThat(resp2.getNodes().get(0).getNode().getId(), equalTo(node2Id)); + assertTrue("There should be no failures in the response", resp.failures().isEmpty()); + assertTrue("The relocation source node should have removed the shard(s)", resp2.getNodes().get(0).getShardIds().isEmpty()); + } catch (AssertionError e) { + // Removal of shards which are no longer allocated to the node is attempted on every cluster state change in IndicesStore. + // If for whatever reason the removal is not triggered (e.g. not enough nodes reported that the shards are active) or it + // temporarily failed to clean up the shard folder, we need to trigger another cluster state change for this removal to + // finally succeed. + updateIndexSettings( + indexName, + Settings.builder().put("index.routing.allocation.exclude.name", "non-existent" + randomAlphaOfLength(5)) + ); + throw e; + } + }); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 0be8f79a3e1c..8ed513a4b4b4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -19,7 +19,7 @@ import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -42,6 +42,7 @@ import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.junit.Before; @@ -50,6 +51,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -391,8 +393,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT; } @Override @@ -401,8 +403,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return Collections.emptyIterator(); } static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -443,7 +445,7 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { clusterService.addListener(event -> { final ClusterState state = event.state(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 7f2c44faf912..a0afbf65300f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; @@ -402,7 +401,7 @@ public void testRerouteExplain() { assertThat(explanation.decisions().type(), equalTo(Decision.Type.YES)); } - public void testMessageLogging() throws Exception { + public void testMessageLogging() { final Settings settings = Settings.builder() .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE.name()) @@ -431,10 +430,7 @@ public void testMessageLogging() throws Exception { .execute() .actionGet(); - Logger actionLogger = LogManager.getLogger(TransportClusterRerouteAction.class); - MockLogAppender dryRunMockLog = new MockLogAppender(); - dryRunMockLog.start(); dryRunMockLog.addExpectation( new MockLogAppender.UnseenEventExpectation( "no completed message logged on dry run", @@ -443,28 +439,26 @@ public void testMessageLogging() throws Exception { "allocated an empty primary*" ) ); - Loggers.addAppender(actionLogger, dryRunMockLog); - - AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); - ClusterRerouteResponse dryRunResponse = client().admin() - .cluster() - .prepareReroute() - .setExplain(randomBoolean()) - .setDryRun(true) - .add(dryRunAllocation) - .execute() - .actionGet(); - - // during a dry run, messages exist but are not logged or exposed - assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1)); - assertThat(dryRunResponse.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary")); - dryRunMockLog.assertAllExpectationsMatched(); - dryRunMockLog.stop(); - Loggers.removeAppender(actionLogger, dryRunMockLog); + try (var ignored = dryRunMockLog.capturing(TransportClusterRerouteAction.class)) { + AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); + ClusterRerouteResponse dryRunResponse = client().admin() + .cluster() + .prepareReroute() + .setExplain(randomBoolean()) + .setDryRun(true) + .add(dryRunAllocation) + .execute() + .actionGet(); + + // during a dry run, messages exist but are not logged or exposed + assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1)); + assertThat(dryRunResponse.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary")); + + dryRunMockLog.assertAllExpectationsMatched(); + } MockLogAppender allocateMockLog = new MockLogAppender(); - allocateMockLog.start(); allocateMockLog.addExpectation( new MockLogAppender.SeenEventExpectation( "message for first allocate empty primary", @@ -481,26 +475,25 @@ public void testMessageLogging() throws Exception { "allocated an empty primary*" + nodeName2 + "*" ) ); - Loggers.addAppender(actionLogger, allocateMockLog); - - AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); - AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); - ClusterRerouteResponse response = client().admin() - .cluster() - .prepareReroute() - .setExplain(true) // so we get a NO decision back rather than an exception - .add(yesDecisionAllocation) - .add(noDecisionAllocation) - .execute() - .actionGet(); - - assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1)); - assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary")); - assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString(nodeName1)); - - allocateMockLog.assertAllExpectationsMatched(); - allocateMockLog.stop(); - Loggers.removeAppender(actionLogger, allocateMockLog); + try (var ignored = allocateMockLog.capturing(TransportClusterRerouteAction.class)) { + + AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true); + AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true); + ClusterRerouteResponse response = client().admin() + .cluster() + .prepareReroute() + .setExplain(true) // so we get a NO decision back rather than an exception + .add(yesDecisionAllocation) + .add(noDecisionAllocation) + .execute() + .actionGet(); + + assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1)); + assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary")); + assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString(nodeName1)); + + allocateMockLog.assertAllExpectationsMatched(); + } } public void testClusterRerouteWithBlocks() { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java new file mode 100644 index 000000000000..6fddb755a6b2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/InitialClusterStateIT.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.action.main.MainAction; +import org.elasticsearch.action.main.MainRequest; +import org.elasticsearch.action.main.MainResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; + +@ESIntegTestCase.ClusterScope(numDataNodes = 0, autoManageMasterNodes = false) +public class InitialClusterStateIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) + .build(); + } + + private static void assertClusterUuid(boolean expectCommitted, String expectedValue) { + for (String nodeName : internalCluster().getNodeNames()) { + final Metadata metadata = client(nodeName).admin().cluster().prepareState().setLocal(true).get().getState().metadata(); + assertEquals(expectCommitted, metadata.clusterUUIDCommitted()); + assertEquals(expectedValue, metadata.clusterUUID()); + + final MainResponse mainResponse = PlainActionFuture.get( + fut -> client(nodeName).execute(MainAction.INSTANCE, new MainRequest(), fut), + 10, + TimeUnit.SECONDS + ); + assertEquals(expectedValue, mainResponse.getClusterUuid()); + } + } + + public void testClusterUuidInInitialClusterState() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + + try { + internalCluster().startDataOnlyNode(); + assertClusterUuid(false, Metadata.UNKNOWN_CLUSTER_UUID); + + internalCluster().startMasterOnlyNode(); + internalCluster().validateClusterFormed(); + + final var clusterUUID = internalCluster().getCurrentMasterNodeInstance(ClusterService.class).state().metadata().clusterUUID(); + assertNotEquals(Metadata.UNKNOWN_CLUSTER_UUID, clusterUUID); + assertClusterUuid(true, clusterUUID); + + internalCluster().stopCurrentMasterNode(); + assertClusterUuid(true, clusterUUID); + + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public boolean validateClusterForming() { + return false; + } + }); + assertClusterUuid(true, clusterUUID); + } finally { + while (true) { + var node = internalCluster().getRandomNodeName(); + if (node == null) { + break; + } + assertTrue(internalCluster().stopNode(node)); + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 856bebba1f51..f1c816fe4a22 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -102,7 +103,10 @@ public ClusterState execute(ClusterState currentState) { builder.blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeIndexBlocks(index)); ClusterState updatedState = builder.build(); - RoutingTable.Builder routingTable = RoutingTable.builder(updatedState.routingTable()); + RoutingTable.Builder routingTable = RoutingTable.builder( + TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, + updatedState.routingTable() + ); routingTable.addAsRecovery(updatedState.metadata().index(index)); updatedState = ClusterState.builder(updatedState).routingTable(routingTable.build()).build(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index eae2c7d37e35..51348a4844ca 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; @@ -77,7 +77,7 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { clusterService.getClusterSettings() .addSettingsUpdateConsumer( @@ -97,7 +97,7 @@ public Collection createComponents( expressionResolver, repositoriesServiceSupplier, tracer, - allocationDeciders + allocationService ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index 7bf202d64fbe..8172316e8a7b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; -import java.util.Collections; import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -191,7 +190,6 @@ private void indexRandomData() throws Exception { private String findNodeWithShard() { ClusterState state = client().admin().cluster().prepareState().get().getState(); List startedShards = RoutingNodesHelper.shardsWithState(state.getRoutingNodes(), ShardRoutingState.STARTED); - Collections.shuffle(startedShards, random()); - return state.nodes().get(startedShards.get(0).currentNodeId()).getName(); + return state.nodes().get(randomFrom(startedShards).currentNodeId()).getName(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java new file mode 100644 index 000000000000..6eadcd9ee2cb --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -0,0 +1,677 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand; +import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.EngineTestCase; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.NoOpEngine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; + +@SuppressWarnings("resource") +public class ShardRoutingRoleIT extends ESIntegTestCase { + + private static final Logger logger = LogManager.getLogger(ShardRoutingRoleIT.class); + + public static class TestPlugin extends Plugin implements ClusterPlugin, EnginePlugin { + + volatile int numIndexingCopies = 1; + static final String NODE_ATTR_UNPROMOTABLE_ONLY = "unpromotableonly"; + + @Override + public ShardRoutingRoleStrategy getShardRoutingRoleStrategy() { + return new ShardRoutingRoleStrategy() { + @Override + public ShardRouting.Role newReplicaRole() { + return ShardRouting.Role.SEARCH_ONLY; + } + + @Override + public ShardRouting.Role newEmptyRole(int copyIndex) { + assert 0 < numIndexingCopies; + return copyIndex < numIndexingCopies ? ShardRouting.Role.INDEX_ONLY : ShardRouting.Role.SEARCH_ONLY; + } + }; + } + + @Override + public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + return List.of(new AllocationDecider() { + @Override + public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + // once a primary is cancelled it _stays_ cancelled + if (shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.REROUTE_CANCELLED) { + return Decision.NO; + } + return super.canForceAllocatePrimary(shardRouting, node, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + var nodesWithUnpromotableOnly = allocation.getClusterState() + .nodes() + .stream() + .filter(n -> Objects.equals("true", n.getAttributes().get(NODE_ATTR_UNPROMOTABLE_ONLY))) + .map(DiscoveryNode::getName) + .collect(Collectors.toUnmodifiableSet()); + if (nodesWithUnpromotableOnly.isEmpty() == false) { + if (nodesWithUnpromotableOnly.contains(node.node().getName())) { + if (shardRouting.isPromotableToPrimary()) { + return allocation.decision( + Decision.NO, + "test", + "shard is promotable to primary so may not be assigned to [" + node.node().getName() + "]" + ); + } + } else { + if (shardRouting.isPromotableToPrimary() == false) { + return allocation.decision( + Decision.NO, + "test", + "shard is not promotable to primary so may not be assigned to [" + node.node().getName() + "]" + ); + } + } + } + return Decision.YES; + } + }); + } + + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + return Optional.of(config -> { + if (config.isPromotableToPrimary()) { + return new InternalEngine(config); + } else { + try { + config.getStore().createEmpty(); + } catch (IOException e) { + logger.error("Error creating empty store", e); + throw new RuntimeException(e); + } + + return new NoOpEngine(EngineTestCase.copy(config, () -> -1L)); + } + }); + } + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.concatLists(List.of(MockTransportService.TestPlugin.class, TestPlugin.class), super.nodePlugins()); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) + .build(); + } + + private static TestPlugin getMasterNodePlugin() { + return internalCluster().getCurrentMasterNodeInstance(PluginsService.class) + .filterPlugins(TestPlugin.class) + .stream() + .findFirst() + .orElseThrow(() -> new AssertionError("no plugin")); + } + + private static final String INDEX_NAME = "test"; + + private static class RoutingTableWatcher implements ClusterStateListener { + + final int numShards = between(1, 3); + int numIndexingCopies = between(1, 2); + int numReplicas = between(numIndexingCopies - 1, 3); + + @Override + public void clusterChanged(ClusterChangedEvent event) { + assertRoles(event.state().getRoutingTable().index(INDEX_NAME)); + } + + private void assertRoles(IndexRoutingTable indexRoutingTable) { + if (indexRoutingTable == null) { + return; + } + var message = indexRoutingTable.prettyPrint(); + assertEquals("number_of_shards: " + message, numShards, indexRoutingTable.size()); + for (int shardId = 0; shardId < numShards; shardId++) { + final var indexShardRoutingTable = indexRoutingTable.shard(shardId); + assertEquals("number_of_replicas: " + message, numReplicas + 1, indexShardRoutingTable.size()); + var indexingShards = 0; + for (int shardCopy = 0; shardCopy < numReplicas + 1; shardCopy++) { + final var shardRouting = indexShardRoutingTable.shard(shardCopy); + switch (shardRouting.role()) { + case INDEX_ONLY -> indexingShards += 1; + case SEARCH_ONLY -> assertFalse(shardRouting.primary()); + case DEFAULT -> fail("should not have any DEFAULT shards"); + } + if (shardRouting.relocating()) { + assertEquals("role on relocation: " + message, shardRouting.role(), shardRouting.getTargetRelocatingShard().role()); + } + } + assertEquals("number_of_indexing_shards: " + message, Math.min(numIndexingCopies, numReplicas + 1), indexingShards); + } + } + + Settings getIndexSettings() { + logger.info("--> numShards={}, numReplicas={}", numShards, numReplicas); + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build(); + } + } + + @SuppressWarnings("unchecked") + private static void assertRolesInRoutingTableXContent(ClusterState state) { + try { + final var routingTable = (Map) XContentTestUtils.convertToMap(state).get("routing_table"); + final var routingTableIndices = (Map) routingTable.get("indices"); + final var routingTableIndex = (Map) routingTableIndices.get("test"); + final var routingTableShards = (Map) routingTableIndex.get("shards"); + for (final var routingTableShardValue : routingTableShards.values()) { + for (Object routingTableShardCopy : (List) routingTableShardValue) { + final var routingTableShard = (Map) routingTableShardCopy; + assertNotNull(ShardRouting.Role.valueOf(routingTableShard.get("role"))); + } + } + } catch (IOException e) { + throw new AssertionError("unexpected", e); + } + } + + private static void installMockTransportVerifications(RoutingTableWatcher routingTableWatcher) { + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (routingTableWatcher.numIndexingCopies == 1) { + assertThat("no recovery action should be exchanged", action, not(startsWith("internal:index/shard/recovery/"))); + assertThat("no replicated action should be exchanged", action, not(containsString("[r]"))); + } + connection.sendRequest(requestId, action, request, options); + }); + mockTransportService.addRequestHandlingBehavior( + TransportUnpromotableShardRefreshAction.NAME, + (handler, request, channel, task) -> { + // Skip handling the request and send an immediate empty response + channel.sendResponse(ActionResponse.Empty.INSTANCE); + } + ); + } + } + + public void testShardCreation() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + + final var clusterState = client().admin().cluster().prepareState().clear().setRoutingTable(true).get().getState(); + + // verify non-DEFAULT roles reported in cluster state XContent + assertRolesInRoutingTableXContent(clusterState); + + // verify non-DEFAULT roles reported in cluster state string representation + var stateAsString = clusterState.toString(); + assertThat(stateAsString, containsString("[" + ShardRouting.Role.INDEX_ONLY + "]")); + assertThat(stateAsString, not(containsString("[" + ShardRouting.Role.DEFAULT + "]"))); + if (routingTableWatcher.numReplicas + 1 > routingTableWatcher.numIndexingCopies) { + assertThat(stateAsString, containsString("[" + ShardRouting.Role.SEARCH_ONLY + "]")); + } + + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + // new replicas get the SEARCH_ONLY role + routingTableWatcher.numReplicas += 1; + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, routingTableWatcher.numReplicas)) + ); + + ensureGreen(INDEX_NAME); + assertEngineTypes(); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); + + // removing replicas drops SEARCH_ONLY copies first + while (routingTableWatcher.numReplicas > 0) { + routingTableWatcher.numReplicas -= 1; + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, routingTableWatcher.numReplicas)) + ); + } + + // restoring the index from a snapshot may change the number of indexing replicas because the routing table is created afresh + var repoPath = randomRepoPath(); + assertAcked( + client().admin() + .cluster() + .preparePutRepository("repo") + .setType("fs") + .setSettings(Settings.builder().put("location", repoPath)) + ); + + assertEquals( + SnapshotState.SUCCESS, + client().admin().cluster().prepareCreateSnapshot("repo", "snap").setWaitForCompletion(true).get().getSnapshotInfo().state() + ); + + if (randomBoolean()) { + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + } else { + assertAcked(client().admin().indices().prepareClose(INDEX_NAME)); + ensureGreen(INDEX_NAME); + } + + routingTableWatcher.numReplicas = between(0, numDataNodes - 1); + routingTableWatcher.numIndexingCopies = between(1, 2); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + assertEquals( + 0, + client().admin() + .cluster() + .prepareRestoreSnapshot("repo", "snap") + .setIndices(INDEX_NAME) + .setIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, routingTableWatcher.numReplicas)) + .setWaitForCompletion(true) + .get() + .getRestoreInfo() + .failedShards() + ); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + private void assertEngineTypes() { + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + final var engine = indexShard.getEngineOrNull(); + assertNotNull(engine); + if (indexShard.routingEntry().isPromotableToPrimary() + && indexShard.indexSettings().getIndexMetadata().getState() == IndexMetadata.State.OPEN) { + assertThat(engine, instanceOf(InternalEngine.class)); + } else { + assertThat(engine, instanceOf(NoOpEngine.class)); + } + } + } + } + } + + public void testRelocation() { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + + for (String nodeName : internalCluster().getNodeNames()) { + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", nodeName)) + ); + ensureGreen(INDEX_NAME); + } + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + public void testPromotion() { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name", "not-a-node")) + ); + + AllocationCommand cancelPrimaryCommand; + while ((cancelPrimaryCommand = getCancelPrimaryCommand()) != null) { + client().admin().cluster().prepareReroute().add(cancelPrimaryCommand).get(); + } + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + @Nullable + public AllocationCommand getCancelPrimaryCommand() { + final var indexRoutingTable = client().admin() + .cluster() + .prepareState() + .clear() + .setRoutingTable(true) + .get() + .getState() + .routingTable() + .index(INDEX_NAME); + for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { + final var indexShardRoutingTable = indexRoutingTable.shard(shardId); + if (indexShardRoutingTable.primaryShard().assignedToNode()) { + return new CancelAllocationCommand(INDEX_NAME, shardId, indexShardRoutingTable.primaryShard().currentNodeId(), true); + } else { + assertThat(indexShardRoutingTable.assignedShards(), empty()); + for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) { + final var shardRouting = indexShardRoutingTable.shard(copy); + assertEquals( + shardRouting.role().isPromotableToPrimary() + ? UnassignedInfo.Reason.REROUTE_CANCELLED + : UnassignedInfo.Reason.UNPROMOTABLE_REPLICA, + shardRouting.unassignedInfo().getReason() + ); + } + } + } + return null; + } + + public void testSearchRouting() throws Exception { + + var routingTableWatcher = new RoutingTableWatcher(); + routingTableWatcher.numReplicas = Math.max(1, routingTableWatcher.numReplicas); + routingTableWatcher.numIndexingCopies = Math.min(routingTableWatcher.numIndexingCopies, routingTableWatcher.numReplicas); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + internalCluster().ensureAtLeastNumDataNodes(routingTableWatcher.numReplicas + 1); + installMockTransportVerifications(routingTableWatcher); + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + final var searchShardProfileKeys = new HashSet(); + final var indexRoutingTable = client().admin() + .cluster() + .prepareState() + .clear() + .setRoutingTable(true) + .get() + .getState() + .routingTable() + .index(INDEX_NAME); + + for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { + final var indexShardRoutingTable = indexRoutingTable.shard(shardId); + for (int shardCopy = 0; shardCopy < indexShardRoutingTable.size(); shardCopy++) { + final var shardRouting = indexShardRoutingTable.shard(shardCopy); + if (shardRouting.role() == ShardRouting.Role.SEARCH_ONLY) { + searchShardProfileKeys.add("[" + shardRouting.currentNodeId() + "][" + INDEX_NAME + "][" + shardId + "]"); + } + } + } + + for (int i = 0; i < 10; i++) { + final var search = client().prepareSearch(INDEX_NAME).setProfile(true); + switch (randomIntBetween(0, 2)) { + case 0 -> search.setRouting(randomAlphaOfLength(10)); + case 1 -> search.setPreference(randomSearchPreference(routingTableWatcher.numShards, internalCluster().getNodeNames())); + default -> { + // do nothing + } + } + final var profileResults = search.get().getProfileResults(); + assertThat(profileResults, not(anEmptyMap())); + for (final var searchShardProfileKey : profileResults.keySet()) { + assertThat(searchShardProfileKeys, hasItem(searchShardProfileKey)); + } + } + + // TODO also verify PIT routing + // TODO also verify the search-shards API + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + private String randomSearchPreference(int numShards, String... nodeIds) { + final var preference = randomFrom(Preference.SHARDS, Preference.PREFER_NODES, Preference.LOCAL); + // ONLY_LOCAL and ONLY_NODES omitted here because they may yield no shard copies which causes the search to fail + // TODO add support for ONLY_LOCAL and ONLY_NODES too + return switch (preference) { + case LOCAL, ONLY_LOCAL -> preference.type(); + case PREFER_NODES, ONLY_NODES -> preference.type() + ":" + String.join(",", randomNonEmptySubsetOf(Arrays.asList(nodeIds))); + case SHARDS -> preference.type() + + ":" + + String.join( + ",", + randomSubsetOf(between(1, numShards), IntStream.range(0, numShards).mapToObj(Integer::toString).toList()) + ); + }; + } + + public void testClosedIndex() { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + assertAcked(client().admin().indices().prepareClose(INDEX_NAME)); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + public void testRefreshOfUnpromotableShards() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + + var numDataNodes = routingTableWatcher.numReplicas + 2; + internalCluster().ensureAtLeastNumDataNodes(numDataNodes); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + final AtomicInteger unpromotableRefreshActions = new AtomicInteger(0); + + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.startsWith(TransportUnpromotableShardRefreshAction.NAME)) { + unpromotableRefreshActions.incrementAndGet(); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + + createIndex( + INDEX_NAME, + Settings.builder() + .put(routingTableWatcher.getIndexSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) + .build() + ); + ensureGreen(INDEX_NAME); + assertEngineTypes(); + + indexRandom(true, INDEX_NAME, randomIntBetween(1, 10)); + + int singleRefreshExpectedUnpromotableActions = (routingTableWatcher.numReplicas - (routingTableWatcher.numIndexingCopies - 1)) + * routingTableWatcher.numShards; + if (singleRefreshExpectedUnpromotableActions > 0) { + assertThat( + "at least one refresh is expected where each primary sends an unpromotable refresh to each unpromotable replica shard.", + unpromotableRefreshActions.get(), + greaterThanOrEqualTo(singleRefreshExpectedUnpromotableActions) + ); + assertThat( + "the number of unpromotable refreshes seen is expected to be a multiple of the occurred refreshes", + unpromotableRefreshActions.get() % singleRefreshExpectedUnpromotableActions, + is(equalTo(0)) + ); + } + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + + public void testNodesWithUnpromotableShardsNeverGetReplicationActions() throws Exception { + var routingTableWatcher = new RoutingTableWatcher(); + var additionalNumberOfNodesWithUnpromotableShards = randomIntBetween(1, 3); + routingTableWatcher.numReplicas = routingTableWatcher.numIndexingCopies + additionalNumberOfNodesWithUnpromotableShards - 1; + internalCluster().ensureAtLeastNumDataNodes(routingTableWatcher.numIndexingCopies + 1); + final List nodesWithUnpromotableOnly = internalCluster().startDataOnlyNodes( + additionalNumberOfNodesWithUnpromotableShards, + Settings.builder().put("node.attr." + TestPlugin.NODE_ATTR_UNPROMOTABLE_ONLY, "true").build() + ); + installMockTransportVerifications(routingTableWatcher); + getMasterNodePlugin().numIndexingCopies = routingTableWatcher.numIndexingCopies; + + for (var transportService : internalCluster().getInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (nodesWithUnpromotableOnly.contains(connection.getNode().getName())) { + assertThat(action, not(containsString("[r]"))); + } + connection.sendRequest(requestId, action, request, options); + }); + } + + final var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + try { + // verify the correct number of shard copies of each role as the routing table evolves + masterClusterService.addListener(routingTableWatcher); + createIndex(INDEX_NAME, routingTableWatcher.getIndexSettings()); + ensureGreen(INDEX_NAME); + indexRandom(randomBoolean(), INDEX_NAME, randomIntBetween(50, 100)); + } finally { + masterClusterService.removeListener(routingTableWatcher); + } + } + +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 064949bf4bae..24950fe160e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Arrays; import java.util.HashSet; @@ -25,6 +26,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.StreamSupport; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -366,7 +368,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10)); assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1")); assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true)); - for (PendingClusterTask task : response) { + for (PendingClusterTask task : response.pendingTasks()) { controlSources.remove(task.getSource().string()); } assertTrue(controlSources.isEmpty()); @@ -414,11 +416,7 @@ public void onFailure(Exception e) { }); } - final var startNanoTime = System.nanoTime(); - while (TimeUnit.MILLISECONDS.convert(System.nanoTime() - startNanoTime, TimeUnit.NANOSECONDS) <= 0) { - // noinspection BusyWait - Thread.sleep(100); - } + waitForTimeToElapse(); pendingClusterTasks = clusterService.getMasterService().pendingTasks(); assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5)); @@ -431,7 +429,7 @@ public void onFailure(Exception e) { response = internalCluster().coordOnlyNodeClient().admin().cluster().preparePendingClusterTasks().get(); assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5)); controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5")); - for (PendingClusterTask task : response) { + for (PendingClusterTask task : response.pendingTasks()) { if (controlSources.remove(task.getSource().string())) { assertThat(task.getTimeInQueueInMillis(), greaterThan(0L)); } @@ -441,4 +439,28 @@ public void onFailure(Exception e) { block2.countDown(); } } + + private static void waitForTimeToElapse() throws InterruptedException { + final ThreadPool[] threadPools = StreamSupport.stream(internalCluster().getInstances(ClusterService.class).spliterator(), false) + .map(ClusterService::threadPool) + .toArray(ThreadPool[]::new); + final long[] startTimes = Arrays.stream(threadPools).mapToLong(ThreadPool::relativeTimeInMillis).toArray(); + + final var startNanoTime = System.nanoTime(); + while (TimeUnit.MILLISECONDS.convert(System.nanoTime() - startNanoTime, TimeUnit.NANOSECONDS) <= 100) { + // noinspection BusyWait + Thread.sleep(100); + } + + outer: do { + for (int i = 0; i < threadPools.length; i++) { + if (threadPools[i].relativeTimeInMillis() <= startTimes[i]) { + // noinspection BusyWait + Thread.sleep(100); + continue outer; + } + } + return; + } while (true); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 6f8f2eb11a80..4a823ed997ea 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -339,7 +339,7 @@ public void testSendingShardFailure() throws Exception { // fail a random shard ShardRouting failedShard = randomFrom( - clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED) + clusterService().state().getRoutingNodes().node(nonMasterNodeId).shardsWithState(ShardRoutingState.STARTED).toList() ); ShardStateAction service = internalCluster().getInstance(ShardStateAction.class, nonMasterNode); CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index e2866d669e06..d4a9467818fd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; @@ -142,7 +143,7 @@ public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception { assertEquals("different meta data version", state.metadata().version(), nodeState.metadata().version()); assertEquals("different routing", state.routingTable().toString(), nodeState.routingTable().toString()); } catch (AssertionError t) { - fail(formatted(""" + fail(Strings.format(""" failed comparing cluster state: %s --- cluster state of node [%s]: --- %s @@ -202,7 +203,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { success = false; } if (success == false) { - fail(formatted(""" + fail(Strings.format(""" node [%s] has no master or has blocks, despite of being on the right side of the partition. State dump: %s""", node, nodeState)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index cf8c44119b22..941400f7ce48 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -135,7 +135,8 @@ private void assertGreenMasterStability(Client client) throws Exception { private void assertMasterStability(Client client, HealthStatus expectedStatus, Matcher expectedMatcher) throws Exception { assertBusy(() -> { - GetHealthAction.Response healthResponse = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true)).get(); + GetHealthAction.Response healthResponse = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)) + .get(); String debugInformation = xContentToString(healthResponse); assertThat(debugInformation, healthResponse.findIndicator("master_is_stable").status(), equalTo(expectedStatus)); assertThat(debugInformation, healthResponse.findIndicator("master_is_stable").symptom(), expectedMatcher); @@ -144,7 +145,7 @@ private void assertMasterStability(Client client, HealthStatus expectedStatus, M private String xContentToString(ChunkedToXContent xContent) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder(); - xContent.toXContentChunked().forEachRemaining(xcontent -> { + xContent.toXContentChunked(ToXContent.EMPTY_PARAMS).forEachRemaining(xcontent -> { try { xcontent.toXContent(builder, ToXContent.EMPTY_PARAMS); } catch (IOException e) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index 155a40d6917b..ba086cb4e978 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -9,13 +9,10 @@ package org.elasticsearch.discovery.single; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.JoinHelper; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; @@ -105,7 +102,6 @@ public Path nodeConfigPath(int nodeOrdinal) { public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception { MockLogAppender mockAppender = new MockLogAppender(); - mockAppender.start(); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation("test", JoinHelper.class.getCanonicalName(), Level.INFO, "failed to join") { @@ -158,20 +154,13 @@ public Path nodeConfigPath(int nodeOrdinal) { "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity() - ) + ); + var ignored = mockAppender.capturing(JoinHelper.class) ) { - - Logger clusterLogger = LogManager.getLogger(JoinHelper.class); - Loggers.addAppender(clusterLogger, mockAppender); - try { - other.beforeTest(random()); - final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); - assertThat(first.nodes().getSize(), equalTo(1)); - assertBusy(() -> mockAppender.assertAllExpectationsMatched()); - } finally { - Loggers.removeAppender(clusterLogger, mockAppender); - mockAppender.stop(); - } + other.beforeTest(random()); + final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); + assertThat(first.nodes().getSize(), equalTo(1)); + assertBusy(mockAppender::assertAllExpectationsMatched); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index b04425f75df0..459b5cfeb491 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -244,7 +244,6 @@ public void testRecentPrimaryInformation() throws Exception { transportServiceOnPrimary.clearAllRules(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/91613") public void testFullClusterRestartPerformNoopRecovery() throws Exception { int numOfReplicas = randomIntBetween(1, 2); internalCluster().ensureAtLeastNumDataNodes(numOfReplicas + 2); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index cdb09dc31054..1008eabd6cc9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -225,7 +225,6 @@ public void testPreferCopyCanPerformNoopRecovery() throws Exception { transportServiceOnPrimary.clearAllRules(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/91451") public void testFullClusterRestartPerformNoopRecovery() throws Exception { int numOfReplicas = randomIntBetween(1, 2); internalCluster().ensureAtLeastNumDataNodes(numOfReplicas + 2); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index a49d38f5c2ea..c486cd5cfe7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -789,7 +789,7 @@ public void testGeneratedStringFieldsStored() throws IOException { void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) { String storedString = stored ? "true" : "false"; - String createIndexSource = formatted(""" + String createIndexSource = Strings.format(""" { "settings": { "index.translog.flush_threshold_size": "1pb", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java index 858c783b0cb1..8d4789da7d1d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.metrics.Counters; @@ -35,11 +35,9 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.function.Supplier; import java.util.stream.Stream; @@ -109,7 +107,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { healthIndicatorServices.add(new IlmHealthIndicatorService(clusterService)); healthIndicatorServices.add(new SlmHealthIndicatorService(clusterService)); @@ -144,7 +142,7 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { var status = clusterService.getClusterSettings().get(statusSetting); return createIndicator( status, @@ -203,8 +201,10 @@ public void testGetHealth() throws Exception { { ExecutionException exception = expectThrows( ExecutionException.class, - () -> client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(NONEXISTENT_INDICATOR_NAME, randomBoolean())) - .get() + () -> client.execute( + GetHealthAction.INSTANCE, + new GetHealthAction.Request(NONEXISTENT_INDICATOR_NAME, randomBoolean(), 1000) + ).get() ); assertThat(exception.getCause(), instanceOf(ResourceNotFoundException.class)); } @@ -232,13 +232,6 @@ public void testGetHealth() throws Exception { } else { expectThrows(IllegalArgumentException.class, () -> stats.get(label)); } - Set expectedStatuses = new HashSet<>(); - expectedStatuses.add(ilmIndicatorStatus); - expectedStatuses.add(mostSevereHealthStatus); - assertThat(response.getStatuses(), equalTo(expectedStatuses)); - if (mostSevereHealthStatus != HealthStatus.GREEN || ilmIndicatorStatus != HealthStatus.GREEN) { - assertThat(response.getIndicators().isEmpty(), equalTo(mostSevereHealthStatus == HealthStatus.GREEN)); - } } } finally { @@ -258,7 +251,7 @@ private void testRootLevel( HealthStatus clusterCoordinationIndicatorStatus, boolean verbose ) throws Exception { - var response = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(verbose)).get(); + var response = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(verbose, 1000)).get(); assertThat( response.getStatus(), @@ -294,7 +287,7 @@ private void testRootLevel( } private void testIndicator(Client client, HealthStatus ilmIndicatorStatus, boolean verbose) throws Exception { - var response = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(ILM_INDICATOR_NAME, verbose)).get(); + var response = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(ILM_INDICATOR_NAME, verbose, 1000)).get(); assertNull(response.getStatus()); assertThat(response.getClusterName(), equalTo(new ClusterName(cluster().getClusterName()))); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index bc59dd27dc70..e987a6dafe6b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; @@ -92,7 +92,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }; - healthService.getHealth(internalCluster.client(node), TestHealthIndicatorService.NAME, true, listener); + healthService.getHealth(internalCluster.client(node), TestHealthIndicatorService.NAME, true, 1000, listener); assertBusy(() -> assertThat(onResponseCalled.get(), equalTo(true))); } } @@ -136,7 +136,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { healthIndicatorServices.add(new TestHealthIndicatorService()); return new ArrayList<>(healthIndicatorServices); @@ -158,7 +158,7 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { assertThat(healthInfo.diskInfoByNode().size(), equalTo(internalCluster().getNodeNames().length)); for (DiskHealthInfo diskHealthInfo : healthInfo.diskInfoByNode().values()) { assertThat(diskHealthInfo.healthStatus(), equalTo(HealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java index 9741f5c791ab..1cab207fda30 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/node/DiskHealthIndicatorServiceIT.java @@ -81,7 +81,7 @@ public void onFailure(Exception e) { throw new RuntimeException(e); } }; - healthService.getHealth(internalCluster().client(node), DiskHealthIndicatorService.NAME, true, listener); + healthService.getHealth(internalCluster().client(node), DiskHealthIndicatorService.NAME, true, 1000, listener); assertBusy(() -> assertNotNull(resultListReference.get())); return resultListReference.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index d3333ce11640..98ce3933f9d3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -53,6 +53,7 @@ import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasToString; @@ -89,7 +90,12 @@ public void testFinalPipelineCantChangeDestination() { IllegalStateException.class, () -> client().prepareIndex("index").setId("1").setSource(Map.of("field", "value")).get() ); - assertThat(e, hasToString(containsString("final pipeline [final_pipeline] can't change the target index"))); + assertThat( + e, + hasToString( + endsWith("final pipeline [final_pipeline] can't change the target index (from [index] to [target]) for document [1]") + ) + ); } public void testFinalPipelineOfOldDestinationIsNotInvoked() { @@ -350,7 +356,7 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { return List.of(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java index dae4cdf4d9aa..a142a03a180f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -75,7 +75,7 @@ public Collection createComponents( IndexNameExpressionResolver expressionResolver, Supplier repositoriesServiceSupplier, Tracer tracer, - AllocationDeciders allocationDeciders + AllocationService allocationService ) { return Collections.singletonList(service); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index 9e392a8c5fa2..3c001a575250 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.translog.Translog; @@ -61,6 +62,18 @@ protected Collection> nodePlugins() { return CollectionUtils.appendToCopy(super.nodePlugins(), TestEnginePlugin.class); } + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + // Document page size should not be too small, else we can fail to write the cluster state for small max doc values + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put( + PersistedClusterStateService.DOCUMENT_PAGE_SIZE.getKey(), + PersistedClusterStateService.DOCUMENT_PAGE_SIZE.get(Settings.EMPTY) + ) + .build(); + } + @Before public void setMaxDocs() { maxDocs.set(randomIntBetween(10, 100)); // Do not set this too low as we can fail to write the cluster state diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 839ef9fe52a0..2ec1fff5a420 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.engine.NoOpEngine; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.TestTranslog; @@ -660,7 +661,9 @@ public static final IndexShard newIndexShard( RetentionLeaseSyncer.EMPTY, cbs, IndexModule.DEFAULT_SNAPSHOT_COMMIT_SUPPLIER, - System::nanoTime + System::nanoTime, + null, + ReplicationTracker.DEFAULT_FACTORY ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index e7b24d49008f..feb2356b31a4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -78,7 +78,9 @@ EngineConfig engineConfigWithLargerIndexingMemory(EngineConfig config) { config.getPrimaryTermSupplier(), config.getSnapshotCommitSupplier(), config.getLeafSorter(), - config.getRelativeTimeInNanosSupplier() + config.getRelativeTimeInNanosSupplier(), + config.getIndexCommitListener(), + config.isPromotableToPrimary() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index 678efc090323..5b488b6873de 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -35,7 +35,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -272,7 +271,7 @@ private static void assertShardStatesMatch( try { assertBusy(waitPredicate, 1, TimeUnit.MINUTES); } catch (AssertionError ae) { - fail(String.format(Locale.ROOT, """ + fail(Strings.format(""" failed to observe expect shard states expected: [%d] shards with states: %s observed: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java index e438f538151c..8649946308e8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateIT.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -28,7 +29,7 @@ public class ConcurrentDynamicTemplateIT extends ESIntegTestCase { // see #3544 public void testConcurrentDynamicMapping() throws Exception { final String fieldName = "field"; - final String mapping = formatted(""" + final String mapping = Strings.format(""" { "dynamic_templates": [ { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index c86e9db4650c..31544cc63725 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -798,7 +798,6 @@ private void validateIndexRecoveryState(RecoveryState.Index indexState) { assertThat(indexState.recoveredBytesPercent(), lessThanOrEqualTo(100.0f)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/91087") public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { final String recoveryActionToBlock = randomFrom( PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index c907324a691c..c45e5343df45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.indices.state; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction; import org.elasticsearch.cluster.ClusterState; @@ -16,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.Glob; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -24,6 +26,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -128,8 +131,7 @@ private Releasable interceptVerifyShardBeforeCloseActions(final String indexPatt TransportService.class, internalCluster().getMasterName() ); - - final CountDownLatch release = new CountDownLatch(1); + final ListenableFuture release = new ListenableFuture<>(); for (DiscoveryNode node : internalCluster().clusterService().state().getNodes()) { mockTransportService.addSendBehavior( internalCluster().getInstance(TransportService.class, node.getName()), @@ -140,21 +142,23 @@ private Releasable interceptVerifyShardBeforeCloseActions(final String indexPatt if (Glob.globMatch(indexPattern, index)) { logger.info("request {} intercepted for index {}", requestId, index); onIntercept.run(); - try { - release.await(); + release.addListener(ActionListener.wrap(() -> { logger.info("request {} released for index {}", requestId, index); - } catch (final InterruptedException e) { - throw new AssertionError(e); - } + try { + connection.sendRequest(requestId, action, request, options); + } catch (IOException e) { + throw new AssertionError(e); + } + })); + return; } } - } connection.sendRequest(requestId, action, request, options); } ); } - return Releasables.releaseOnce(release::countDown); + return Releasables.releaseOnce(() -> release.onResponse(null)); } private static void assertIndexIsBlocked(final String... indices) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java new file mode 100644 index 000000000000..eac5c092b7ef --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java @@ -0,0 +1,151 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.ingest; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.plugins.IngestPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; + +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +/** + * The purpose of this test is to verify that when a processor executes an operation asynchronously that + * the expected result is the same as if the same operation happens synchronously. + * + * In this test two test processor are defined that basically do the same operation, but a single processor + * executes asynchronously. The result of the operation should be the same and also the order in which the + * bulk responses are returned should be the same as how the corresponding index requests were defined. + */ +public class IngestAsyncProcessorIT extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return List.of(TestPlugin.class); + } + + public void testAsyncProcessorImplementation() { + // A pipeline with 2 processors: the test async processor and sync test processor. + BytesReference pipelineBody = new BytesArray("{\"processors\": [{\"test-async\": {}, \"test\": {}}]}"); + client().admin().cluster().putPipeline(new PutPipelineRequest("_id", pipelineBody, XContentType.JSON)).actionGet(); + + BulkRequest bulkRequest = new BulkRequest(); + int numDocs = randomIntBetween(8, 256); + for (int i = 0; i < numDocs; i++) { + bulkRequest.add(new IndexRequest("foobar").id(Integer.toString(i)).source("{}", XContentType.JSON).setPipeline("_id")); + } + BulkResponse bulkResponse = client().bulk(bulkRequest).actionGet(); + assertThat(bulkResponse.getItems().length, equalTo(numDocs)); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + assertThat(bulkResponse.getItems()[i].getId(), equalTo(id)); + GetResponse getResponse = client().get(new GetRequest("foobar", id)).actionGet(); + // The expected result of async test processor: + assertThat(getResponse.getSource().get("foo"), equalTo("bar-" + id)); + // The expected result of sync test processor: + assertThat(getResponse.getSource().get("bar"), equalTo("baz-" + id)); + } + } + + public static class TestPlugin extends Plugin implements IngestPlugin { + + private ThreadPool threadPool; + + @Override + public Collection createComponents( + Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver expressionResolver, + Supplier repositoriesServiceSupplier, + Tracer tracer, + AllocationService allocationService + ) { + this.threadPool = threadPool; + return List.of(); + } + + @Override + public Map getProcessors(Processor.Parameters parameters) { + return Map.of("test-async", (factories, tag, description, config) -> new AbstractProcessor(tag, description) { + + @Override + public void execute(IngestDocument ingestDocument, BiConsumer handler) { + threadPool.generic().execute(() -> { + String id = (String) ingestDocument.getSourceAndMetadata().get("_id"); + if (usually()) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + // ignore + } + } + ingestDocument.setFieldValue("foo", "bar-" + id); + handler.accept(ingestDocument, null); + }); + } + + @Override + public String getType() { + return "test-async"; + } + + @Override + public boolean isAsync() { + return true; + } + + }, "test", (processorFactories, tag, description, config) -> new AbstractProcessor(tag, description) { + @Override + public IngestDocument execute(IngestDocument ingestDocument) throws Exception { + String id = (String) ingestDocument.getSourceAndMetadata().get("_id"); + ingestDocument.setFieldValue("bar", "baz-" + id); + return ingestDocument; + } + + @Override + public String getType() { + return "test"; + } + }); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java index 1fc92aa4ae68..aac7cd2ef22a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java @@ -101,7 +101,7 @@ protected Collection> nodePlugins() { "ingest_pipelines": { "my_ingest_pipeline": { "description": "_description", - "processors": [ + "processors": { "foo" : { "field": "pipeline", @@ -204,11 +204,11 @@ public void clusterChanged(ClusterChangedEvent event) { clusterService.removeListener(this); metadataVersion.set(event.state().metadata().version()); savedClusterState.countDown(); - assertEquals(ReservedStateErrorMetadata.ErrorKind.VALIDATION, reservedState.errorMetadata().errorKind()); + assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, reservedState.errorMetadata().errorKind()); assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); assertThat( reservedState.errorMetadata().errors().get(0), - containsString("org.elasticsearch.ElasticsearchParseException: No processor type exists with name [foo]") + containsString("org.elasticsearch.xcontent.XContentParseException: [17:16] [reserved_state_chunk] failed") ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java new file mode 100644 index 000000000000..fea613e671b9 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.ingest; + +import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class IngestStatsNamesAndTypesIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(CustomIngestTestPlugin.class, CustomScriptPlugin.class); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @SuppressWarnings("unchecked") + public void testIngestStatsNamesAndTypes() throws IOException { + String pipeline1 = org.elasticsearch.core.Strings.format(""" + { + "processors": [ + { + "set": { + "tag": "set-a", + "field": "a", + "value": "1" + } + }, + { + "set": { + "tag": "set-b", + "field": "b", + "value": "2", + "if": { + "lang": "%s", + "source": "false_script" + } + } + }, + { + "set": { + "tag": "set-c", + "field": "c", + "value": "3", + "ignore_failure": true + } + }, + { + "set": { + "tag": "set-d", + "field": "d", + "value": "4", + "if": { + "lang": "%s", + "source": "true_script" + }, + "ignore_failure": true + } + } + ] + } + """, MockScriptEngine.NAME, MockScriptEngine.NAME); + BytesReference pipeline1Reference = new BytesArray(pipeline1); + client().admin().cluster().putPipeline(new PutPipelineRequest("pipeline1", pipeline1Reference, XContentType.JSON)).actionGet(); + + // index a single document through the pipeline + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("index1").id("1").source("{}", XContentType.JSON).setPipeline("pipeline1")); + client().bulk(bulkRequest).actionGet(); + + { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .nodesStats(new NodesStatsRequest().addMetric("ingest")) + .actionGet(); + assertThat(nodesStatsResponse.getNodes().size(), equalTo(1)); + + NodeStats stats = nodesStatsResponse.getNodes().get(0); + assertThat(stats.getIngestStats().getTotalStats().getIngestCount(), equalTo(1L)); + assertThat(stats.getIngestStats().getPipelineStats().size(), equalTo(1)); + + IngestStats.PipelineStat pipelineStat = stats.getIngestStats().getPipelineStats().get(0); + assertThat(pipelineStat.getPipelineId(), equalTo("pipeline1")); + assertThat(pipelineStat.getStats().getIngestCount(), equalTo(1L)); + + List processorStats = stats.getIngestStats().getProcessorStats().get("pipeline1"); + assertThat(processorStats.size(), equalTo(4)); + + IngestStats.ProcessorStat setA = processorStats.get(0); + assertThat(setA.getName(), equalTo("set:set-a")); + assertThat(setA.getType(), equalTo("set")); + assertThat(setA.getStats().getIngestCount(), equalTo(1L)); + + IngestStats.ProcessorStat setB = processorStats.get(1); + assertThat(setB.getName(), equalTo("set:set-b")); + assertThat(setB.getType(), equalTo("conditional")); + assertThat(setB.getStats().getIngestCount(), equalTo(0L)); // see false_script above + + IngestStats.ProcessorStat setC = processorStats.get(2); + assertThat(setC.getName(), equalTo("set:set-c")); + assertThat(setC.getType(), equalTo("set")); + assertThat(setC.getStats().getIngestCount(), equalTo(1L)); + + IngestStats.ProcessorStat setD = processorStats.get(3); + assertThat(setD.getName(), equalTo("compound:CompoundProcessor-set-d")); + assertThat(setD.getType(), equalTo("conditional")); + assertThat(setD.getStats().getIngestCount(), equalTo(1L)); + } + + { + // the bits that we want to read from the cluster stats response aren't visible in java code (no getters, + // non-public classes and methods), roundtrip through json so that we can read what we want + ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); + XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); + builder.startObject(); + response.toXContent(builder, new ToXContent.MapParams(Map.of())); + builder.endObject(); + Map stats = createParser(JsonXContent.jsonXContent, Strings.toString(builder)).map(); + + int setProcessorCount = path(stats, "nodes.ingest.processor_stats.set.count"); + assertThat(setProcessorCount, equalTo(2)); + int conditionalProcessorCount = path(stats, "nodes.ingest.processor_stats.conditional.count"); + assertThat(conditionalProcessorCount, equalTo(1)); + } + } + + @SuppressWarnings("unchecked") + private static T path(Map map, String path) { + String[] paths = path.split("\\."); + String[] leading = Arrays.copyOfRange(paths, 0, paths.length - 1); + String trailing = paths[paths.length - 1]; + for (String key : leading) { + map = (Map) map.get(key); + } + return (T) map.get(trailing); + } + + public static class CustomIngestTestPlugin extends IngestTestPlugin { + @Override + public Map getProcessors(Processor.Parameters parameters) { + Map processors = new HashMap<>(); + processors.put("set", (factories, tag, description, config) -> { + String field = (String) config.remove("field"); + String value = (String) config.remove("value"); + return new FakeProcessor("set", tag, description, (ingestDocument) -> ingestDocument.setFieldValue(field, value)); + }); + + return processors; + } + } + + public static class CustomScriptPlugin extends MockScriptPlugin { + @Override + protected Map, Object>> pluginScripts() { + return Map.of("true_script", ctx -> true, "false_script", ctx -> false); + } + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java index aa1a0d1fefd4..2002c36a0f86 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.persistent; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -115,8 +115,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT; } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java index 857ff11d5422..f40208c359ff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/plugins/IndexFoldersDeletionListenerIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.nio.file.Files; import java.nio.file.Path; @@ -127,6 +128,14 @@ public void testListenersInvokedWhenIndexIsDeleted() throws Exception { }, 30L, TimeUnit.SECONDS); } + @TestLogging( + reason = "Debug #93226", + value = "org.elasticsearch.indices.cluster.IndicesClusterStateService:DEBUG," + + "org.elasticsearch.indices.IndicesService:DEBUG," + + "org.elasticsearch.index.IndexService:DEBUG," + + "org.elasticsearch.env.NodeEnvironment:DEBUG," + + "org.elasticsearch.cluster.service.MasterService:TRACE" + ) public void testListenersInvokedWhenIndexIsRelocated() throws Exception { final String masterNode = internalCluster().startMasterOnlyNode(); internalCluster().startDataOnlyNodes(4); @@ -165,6 +174,7 @@ public void testListenersInvokedWhenIndexIsRelocated() throws Exception { } final List excludedNodes = randomSubsetOf(2, shardsByNodes.keySet()); + logger.info("--> excluding nodes {}", excludedNodes); assertAcked( client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index b25b59355b6f..2bb11d678e57 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -7,25 +7,77 @@ */ package org.elasticsearch.readiness; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.ReservedStateErrorMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; +import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.readiness.ReadinessClientProbe; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; import static org.elasticsearch.test.NodeRoles.masterNode; import static org.elasticsearch.test.NodeRoles.nonDataNode; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; @ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class ReadinessClusterIT extends ESIntegTestCase implements ReadinessClientProbe { + private static AtomicLong versionCounter = new AtomicLong(1); + + private static String testErrorJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "not_cluster_settings": { + "search.allow_expensive_queries": "false" + } + } + }"""; + + private static String testJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "50mb" + } + } + }"""; + @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder() @@ -152,4 +204,148 @@ public Settings onNodeStopped(String nodeName) throws Exception { tcpReadinessProbeTrue(s); } } + + private Tuple setupClusterStateListenerForError(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null && reservedState.errorMetadata() != null) { + assertEquals(ReservedStateErrorMetadata.ErrorKind.PARSING, reservedState.errorMetadata().errorKind()); + assertThat(reservedState.errorMetadata().errors(), allOf(notNullValue(), hasSize(1))); + assertThat( + reservedState.errorMetadata().errors().get(0), + containsString("Missing handler definition for content key [not_cluster_settings]") + ); + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private void writeJSONFile(String node, String json) throws Exception { + long version = versionCounter.incrementAndGet(); + + FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); + + Files.createDirectories(fileSettingsService.operatorSettingsDir()); + Path tempFilePath = createTempFile(); + + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> New file settings: [{}]", Strings.format(json, version)); + } + + public void testNotReadyOnBadFileSettings() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + logger.info("--> start data node / non master node"); + String dataNode = internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); + FileSettingsService dataFileSettingsService = internalCluster().getInstance(FileSettingsService.class, dataNode); + + assertFalse(dataFileSettingsService.watching()); + + logger.info("--> write bad file settings before we boot master node"); + writeJSONFile(dataNode, testErrorJSON); + + logger.info("--> start master node"); + final String masterNode = internalCluster().startMasterOnlyNode( + Settings.builder().put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() + ); + assertMasterNode(internalCluster().nonMasterClient(), masterNode); + var savedClusterState = setupClusterStateListenerForError(masterNode); + + // we need this after we setup the listener above, in case the node started and processed + // settings before we set our listener to cluster state changes. + causeClusterStateUpdate(); + + FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); + + assertTrue(masterFileSettingsService.watching()); + assertFalse(dataFileSettingsService.watching()); + + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + ReadinessService s = internalCluster().getInstance(ReadinessService.class, internalCluster().getMasterName()); + assertNull(s.boundAddress()); + } + + private Tuple setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata != null && handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec")) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + public void testReadyAfterCorrectFileSettings() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + logger.info("--> start data node / non master node"); + String dataNode = internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); + FileSettingsService dataFileSettingsService = internalCluster().getInstance(FileSettingsService.class, dataNode); + + assertFalse(dataFileSettingsService.watching()); + var savedClusterState = setupClusterStateListener(dataNode); + + logger.info("--> write correct file settings before we boot master node"); + writeJSONFile(dataNode, testJSON); + + logger.info("--> start master node"); + final String masterNode = internalCluster().startMasterOnlyNode(); + assertMasterNode(internalCluster().nonMasterClient(), masterNode); + + FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); + + assertTrue(masterFileSettingsService.watching()); + assertFalse(dataFileSettingsService.watching()); + + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + ReadinessService s = internalCluster().getInstance(ReadinessService.class, internalCluster().getMasterName()); + tcpReadinessProbeTrue(s); + } + + private void causeClusterStateUpdate() { + PlainActionFuture.get( + fut -> internalCluster().getCurrentMasterNodeInstance(ClusterService.class) + .submitUnbatchedStateUpdateTask("poke", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(Exception e) { + assert false : e; + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + fut.onResponse(null); + } + }) + ); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index 69f88b1bc886..9f7c53d49407 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.repositories.fs.FsRepository; @@ -31,7 +32,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Optional; @@ -59,7 +59,7 @@ public void testGetShardSnapshotFromUnknownRepoReturnsAnError() throws Exception assertThat(repositoryException, is(notNullValue())); assertThat( repositoryException.getMessage(), - equalTo(formatted("[%s] Unable to find the latest snapshot for shard [[idx][0]]", repository)) + equalTo(Strings.format("[%s] Unable to find the latest snapshot for shard [[idx][0]]", repository)) ); } } else { @@ -133,7 +133,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce indexRandomDocs(indexName2, 10); } final List snapshotIndices = randomSubsetOf(indices); - final SnapshotInfo snapshotInfo = createSnapshot(repoName, formatted("snap-%03d", i), snapshotIndices); + final SnapshotInfo snapshotInfo = createSnapshot(repoName, Strings.format("snap-%03d", i), snapshotIndices); if (snapshotInfo.indices().contains(indexName)) { lastSnapshot = snapshotInfo; ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().execute().actionGet(); @@ -206,10 +206,12 @@ public void testGetShardSnapshotFailureHandlingLetOtherRepositoriesRequestsMakeP createIndexWithContent(indexName); int snapshotIdx = 0; - createSnapshot(failingRepoName, formatted("snap-%03d", snapshotIdx++), Collections.singletonList(indexName)); + Object[] args1 = new Object[] { snapshotIdx++ }; + createSnapshot(failingRepoName, Strings.format("snap-%03d", args1), Collections.singletonList(indexName)); SnapshotInfo latestSnapshot = null; for (String workingRepoName : workingRepoNames) { - String snapshot = formatted("snap-%03d", snapshotIdx++); + Object[] args = new Object[] { snapshotIdx++ }; + String snapshot = Strings.format("snap-%03d", args); latestSnapshot = createSnapshot(workingRepoName, snapshot, Collections.singletonList(indexName)); } @@ -234,7 +236,7 @@ public void testGetShardSnapshotFailureHandlingLetOtherRepositoriesRequestsMakeP assertThat(error.isPresent(), is(equalTo(true))); assertThat( error.get().getMessage(), - equalTo(String.format(Locale.ROOT, "[%s] Unable to find the latest snapshot for shard [[%s][0]]", failingRepoName, indexName)) + equalTo(Strings.format("[%s] Unable to find the latest snapshot for shard [[%s][0]]", failingRepoName, indexName)) ); for (String workingRepoName : workingRepoNames) { @@ -264,7 +266,8 @@ public void testGetShardSnapshotInMultipleRepositoriesReturnsTheLatestSnapshot() int snapshotIdx = 0; SnapshotInfo expectedLatestSnapshot = null; for (String repository : repositories) { - String snapshot = formatted("snap-%03d", snapshotIdx++); + Object[] args = new Object[] { snapshotIdx++ }; + String snapshot = Strings.format("snap-%03d", args); expectedLatestSnapshot = createSnapshot(repository, snapshot, Collections.singletonList(indexName)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 8954bcd542cf..c7b2387b2fa6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.reservedstate.service; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.template.get.GetComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; @@ -33,6 +35,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -399,13 +402,15 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - final var response = client().execute( - GetComposableIndexTemplateAction.INSTANCE, - new GetComposableIndexTemplateAction.Request("template*") - ).get(); + final ClusterStateResponse clusterStateResponse = client().admin() + .cluster() + .state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())) + .actionGet(); + + Map allTemplates = clusterStateResponse.getState().metadata().templatesV2(); assertThat( - response.indexTemplates().keySet().stream().collect(Collectors.toSet()), + allTemplates.keySet().stream().collect(Collectors.toSet()), containsInAnyOrder("template_1", "template_2", "template_other") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java index cfde48d088db..10a14d027f9f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/FileSettingsServiceIT.java @@ -34,7 +34,9 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; +import static org.elasticsearch.test.NodeRoles.masterNode; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -60,6 +62,30 @@ public class FileSettingsServiceIT extends ESIntegTestCase { } }"""; + private static String testJSON43mb = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "43mb" + } + } + }"""; + + private static String testCleanupJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": {} + } + }"""; + private static String testErrorJSON = """ { "metadata": { @@ -90,6 +116,29 @@ private void writeJSONFile(String node, String json) throws Exception { Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); + logger.info("--> New file settings: [{}]", Strings.format(json, version)); + } + + private Tuple setupCleanupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata != null && handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec") == false) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); } private Tuple setupClusterStateListener(String node) { @@ -114,7 +163,8 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } - private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { + private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion, String expectedBytesPerSec) + throws Exception { boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); @@ -125,7 +175,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertThat( clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), - equalTo("50mb") + equalTo(expectedBytesPerSec) ); ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( @@ -158,7 +208,7 @@ public void testSettingsApplied() throws Exception { assertFalse(dataFileSettingsService.watching()); writeJSONFile(masterNode, testJSON); - assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); } public void testSettingsAppliedOnStart() throws Exception { @@ -183,7 +233,41 @@ public void testSettingsAppliedOnStart() throws Exception { assertTrue(masterFileSettingsService.watching()); assertFalse(dataFileSettingsService.watching()); - assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); + } + + public void testReservedStatePersistsOnRestart() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + logger.info("--> start master node"); + final String masterNode = internalCluster().startMasterOnlyNode( + Settings.builder().put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() + ); + assertMasterNode(internalCluster().masterClient(), masterNode); + var savedClusterState = setupClusterStateListener(masterNode); + + FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); + + assertTrue(masterFileSettingsService.watching()); + + logger.info("--> write some settings"); + writeJSONFile(masterNode, testJSON); + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); + + logger.info("--> restart master"); + internalCluster().restartNode(masterNode); + + final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); + assertEquals( + 1, + clusterStateResponse.getState() + .metadata() + .reservedStateMetadata() + .get(FileSettingsService.NAMESPACE) + .handlers() + .get(ReservedClusterSettingsAction.NAME) + .keys() + .size() + ); } private Tuple setupClusterStateListenerForError(String node) { @@ -238,7 +322,9 @@ public void testErrorSaved() throws Exception { assertFalse(dataFileSettingsService.watching()); logger.info("--> start master node"); - final String masterNode = internalCluster().startMasterOnlyNode(); + final String masterNode = internalCluster().startMasterOnlyNode( + Settings.builder().put(INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() + ); assertMasterNode(internalCluster().nonMasterClient(), masterNode); var savedClusterState = setupClusterStateListenerForError(masterNode); @@ -250,4 +336,48 @@ public void testErrorSaved() throws Exception { writeJSONFile(masterNode, testErrorJSON); assertClusterStateNotSaved(savedClusterState.v1(), savedClusterState.v2()); } + + public void testSettingsAppliedOnMasterReElection() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + logger.info("--> start master node"); + final String masterNode = internalCluster().startMasterOnlyNode(); + + logger.info("--> start master eligible nodes, 2 more for quorum"); + String masterNode1 = internalCluster().startNode(Settings.builder().put(masterNode()).put("discovery.initial_state_timeout", "1s")); + String masterNode2 = internalCluster().startNode(Settings.builder().put(masterNode()).put("discovery.initial_state_timeout", "1s")); + FileSettingsService master1FS = internalCluster().getInstance(FileSettingsService.class, masterNode1); + FileSettingsService master2FS = internalCluster().getInstance(FileSettingsService.class, masterNode2); + + assertFalse(master1FS.watching()); + assertFalse(master2FS.watching()); + + var savedClusterState = setupClusterStateListener(masterNode); + FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); + + assertTrue(masterFileSettingsService.watching()); + + writeJSONFile(masterNode, testJSON); + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "50mb"); + + internalCluster().stopCurrentMasterNode(); + ensureStableCluster(2); + + FileSettingsService masterFS = internalCluster().getCurrentMasterNodeInstance(FileSettingsService.class); + assertTrue(masterFS.watching()); + logger.info("--> start another master eligible node to form a quorum"); + internalCluster().startNode(Settings.builder().put(masterNode()).put("discovery.initial_state_timeout", "1s")); + ensureStableCluster(3); + + savedClusterState = setupCleanupClusterStateListener(internalCluster().getMasterName()); + writeJSONFile(internalCluster().getMasterName(), testCleanupJSON); + + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + savedClusterState = setupClusterStateListener(internalCluster().getMasterName()); + writeJSONFile(internalCluster().getMasterName(), testJSON43mb); + + assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2(), "43mb"); + } + } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 81e28686ee95..577e9f275e03 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -49,7 +49,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) public class RepositoriesFileSettingsIT extends ESIntegTestCase { - private static AtomicLong versionCounter = new AtomicLong(1); private static String testJSON = """ diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java deleted file mode 100644 index 360dd4a68415..000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnaphotsAndFileSettingsIT.java +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.reservedstate.service; - -import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.InternalClusterInfoService; -import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; -import org.elasticsearch.cluster.metadata.ReservedStateMetadata; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.Strings; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; -import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; -import org.elasticsearch.snapshots.SnapshotState; -import org.junit.After; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; - -/** - * Tests that snapshot restore behaves correctly when we have file based settings that reserve part of the - * cluster state - */ -public class SnaphotsAndFileSettingsIT extends AbstractSnapshotIntegTestCase { - private static AtomicLong versionCounter = new AtomicLong(1); - - private static String testFileSettingsJSON = """ - { - "metadata": { - "version": "%s", - "compatibility": "8.4.0" - }, - "state": { - "cluster_settings": { - "indices.recovery.max_bytes_per_sec": "50mb" - } - } - }"""; - - private static String emptyFileSettingsJSON = """ - { - "metadata": { - "version": "%s", - "compatibility": "8.4.0" - }, - "state": { - "cluster_settings": {} - } - }"""; - - @After - public void cleanUp() throws Exception { - awaitNoMoreRunningOperations(); - } - - private void writeJSONFile(String node, String json) throws Exception { - long version = versionCounter.incrementAndGet(); - - FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); - - Files.createDirectories(fileSettingsService.operatorSettingsDir()); - Path tempFilePath = createTempFile(); - - Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); - Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); - } - - private Tuple setupClusterStateListener(String node) { - ClusterService clusterService = internalCluster().clusterService(node); - CountDownLatch savedClusterState = new CountDownLatch(1); - AtomicLong metadataVersion = new AtomicLong(-1); - clusterService.addListener(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (reservedState != null && reservedState.version() != 0L) { - ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); - if (handlerMetadata != null && handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec")) { - clusterService.removeListener(this); - metadataVersion.set(event.state().metadata().version()); - savedClusterState.countDown(); - } - } - } - }); - - return new Tuple<>(savedClusterState, metadataVersion); - } - - private ClusterStateResponse assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { - boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); - assertTrue(awaitSuccessful); - - return clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())).get(); - } - - public void testRestoreWithRemovedFileSettings() throws Exception { - createRepository("test-repo", "fs"); - - logger.info("--> set some persistent cluster settings"); - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) - .build() - ) - ); - - ensureGreen(); - - String masterNode = internalCluster().getMasterName(); - - var savedClusterState = setupClusterStateListener(masterNode); - FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); - - logger.info("--> write some file based settings, putting some reserved state"); - writeJSONFile(masterNode, testFileSettingsJSON); - final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); - assertThat( - savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), - equalTo("50mb") - ); - - logger.info("--> create full snapshot"); - createFullSnapshot("test-repo", "test-snap"); - assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); - - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) - .build() - ) - ); - - logger.info("--> deleting operator file, no file based settings"); - Files.delete(fs.operatorSettingsFile()); - - logger.info("--> restore global state from the snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); - - ensureGreen(); - - final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); - - // We expect no reserved metadata state for file based settings, the operator file was deleted. - assertNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); - - final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( - ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() - ).actionGet(); - - assertThat( - getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), - equalTo("25s") - ); - // We didn't remove the setting set by file settings, we simply removed the reserved (operator) section. - assertThat(getSettingsResponse.persistentSettings().get("indices.recovery.max_bytes_per_sec"), equalTo("50mb")); - // cleanup - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) - .put("indices.recovery.max_bytes_per_sec", (String) null) - .build() - ) - ); - } - - private Tuple removedReservedClusterStateListener(String node) { - ClusterService clusterService = internalCluster().clusterService(node); - CountDownLatch savedClusterState = new CountDownLatch(2); - AtomicLong metadataVersion = new AtomicLong(-1); - clusterService.addListener(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - // we first wait for reserved state version to become 0, then we expect to see it non-zero - if (reservedState != null && reservedState.version() == 0L) { - // don't remove the state listener yet, we need it to see the version non-zero - metadataVersion.set(event.state().metadata().version()); - savedClusterState.countDown(); - } else if (reservedState != null && reservedState.version() != 0L && savedClusterState.getCount() < 2) { - clusterService.removeListener(this); - metadataVersion.set(event.state().metadata().version()); - savedClusterState.countDown(); - } - } - }); - - return new Tuple<>(savedClusterState, metadataVersion); - } - - private Tuple cleanedClusterStateListener(String node) { - ClusterService clusterService = internalCluster().clusterService(node); - CountDownLatch savedClusterState = new CountDownLatch(1); - AtomicLong metadataVersion = new AtomicLong(-1); - clusterService.addListener(new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent event) { - ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); - if (reservedState != null) { - ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); - if (handlerMetadata == null) { - fail("Should've found cluster settings in this metadata"); - } - if (handlerMetadata.keys().isEmpty()) { - clusterService.removeListener(this); - metadataVersion.set(event.state().metadata().version()); - savedClusterState.countDown(); - } - } - } - }); - - return new Tuple<>(savedClusterState, metadataVersion); - } - - public void testRestoreWithPersistedFileSettings() throws Exception { - createRepository("test-repo", "fs"); - - logger.info("--> set some persistent cluster settings"); - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) - .build() - ) - ); - - ensureGreen(); - - String masterNode = internalCluster().getMasterName(); - - var savedClusterState = setupClusterStateListener(masterNode); - FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); - - logger.info("--> write some file based settings, putting some reserved state"); - writeJSONFile(masterNode, testFileSettingsJSON); - final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); - assertThat( - savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), - equalTo("50mb") - ); - - logger.info("--> create full snapshot"); - createFullSnapshot("test-repo", "test-snap"); - assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); - - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) - .build() - ) - ); - - logger.info("--> restore global state from the snapshot"); - var removedReservedState = removedReservedClusterStateListener(masterNode); - - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); - - ensureGreen(); - - // When the target cluster of a restore has an existing operator file, we don't un-reserve the reserved - // cluster state for file based settings, but instead we reset the version to 0 and 'touch' the operator file - // so that it gets re-processed. - logger.info("--> reserved state version will be reset to 0, because of snapshot restore"); - // double timeout, we restore snapshot then apply the file - assertTrue(removedReservedState.v1().await(40, TimeUnit.SECONDS)); - - logger.info("--> reserved state would be restored to non-zero version"); - - final ClusterStateResponse clusterStateResponse = clusterAdmin().state( - new ClusterStateRequest().metadata(true).waitForMetadataVersion(removedReservedState.v2().get()) - ).actionGet(); - - assertNotNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); - - final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( - ClusterGetSettingsAction.INSTANCE, - new ClusterGetSettingsAction.Request() - ).actionGet(); - - assertThat( - getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), - equalTo("25s") - ); - - // we need to remove the reserved state, so that clean-up can happen - var cleanupReservedState = cleanedClusterStateListener(masterNode); - - logger.info("--> clear the file based settings"); - writeJSONFile(masterNode, emptyFileSettingsJSON); - assertClusterStateSaveOK(cleanupReservedState.v1(), cleanupReservedState.v2()); - // cleanup - assertAcked( - clusterAdmin().prepareUpdateSettings() - .setPersistentSettings( - Settings.builder() - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) - .put("indices.recovery.max_bytes_per_sec", (String) null) - .build() - ) - ); - } - -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java new file mode 100644 index 000000000000..82259968a98e --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -0,0 +1,355 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.reservedstate.service; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.ReservedStateHandlerMetadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; +import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; +import org.elasticsearch.snapshots.SnapshotState; +import org.junit.After; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests that snapshot restore behaves correctly when we have file based settings that reserve part of the + * cluster state + */ +@LuceneTestCase.SuppressFileSystems("*") +public class SnapshotsAndFileSettingsIT extends AbstractSnapshotIntegTestCase { + private static AtomicLong versionCounter = new AtomicLong(1); + + private static String testFileSettingsJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": { + "indices.recovery.max_bytes_per_sec": "50mb" + } + } + }"""; + + private static String emptyFileSettingsJSON = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "cluster_settings": {} + } + }"""; + + @After + public void cleanUp() throws Exception { + awaitNoMoreRunningOperations(); + } + + private long retryDelay(int retryCount) { + return 100 * (1 << retryCount) + Randomness.get().nextInt(10); + } + + private void writeJSONFile(String node, String json) throws Exception { + long version = versionCounter.incrementAndGet(); + + FileSettingsService fileSettingsService = internalCluster().getInstance(FileSettingsService.class, node); + + Files.createDirectories(fileSettingsService.operatorSettingsDir()); + Path tempFilePath = createTempFile(); + + Files.write(tempFilePath, Strings.format(json, version).getBytes(StandardCharsets.UTF_8)); + int retryCount = 0; + do { + try { + // this can fail on Windows because of timing + Files.move(tempFilePath, fileSettingsService.operatorSettingsFile(), StandardCopyOption.ATOMIC_MOVE); + return; + } catch (IOException e) { + logger.info("--> retrying writing a settings file [" + retryCount + "]"); + if (retryCount == 4) { // retry 5 times + throw e; + } + Thread.sleep(retryDelay(retryCount)); + retryCount++; + } + } while (true); + } + + private Tuple setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null && reservedState.version() != 0L) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata != null && handlerMetadata.keys().contains("indices.recovery.max_bytes_per_sec")) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private ClusterStateResponse assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLong metadataVersion) throws Exception { + boolean awaitSuccessful = savedClusterState.await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + return clusterAdmin().state(new ClusterStateRequest().waitForMetadataVersion(metadataVersion.get())).get(); + } + + public void testRestoreWithRemovedFileSettings() throws Exception { + createRepository("test-repo", "fs"); + + logger.info("--> set some persistent cluster settings"); + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) + .build() + ) + ); + + ensureGreen(); + + String masterNode = internalCluster().getMasterName(); + + var savedClusterState = setupClusterStateListener(masterNode); + FileSettingsService fs = internalCluster().getInstance(FileSettingsService.class, masterNode); + + logger.info("--> write some file based settings, putting some reserved state"); + writeJSONFile(masterNode, testFileSettingsJSON); + final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); + assertThat( + savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + logger.info("--> create full snapshot"); + createFullSnapshot("test-repo", "test-snap"); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) + .build() + ) + ); + + logger.info("--> deleting operator file, no file based settings"); + Files.delete(fs.operatorSettingsFile()); + + logger.info("--> restore global state from the snapshot"); + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + + ensureGreen(); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state(new ClusterStateRequest().metadata(true)).actionGet(); + + // We expect no reserved metadata state for file based settings, the operator file was deleted. + assertNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); + + final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( + ClusterGetSettingsAction.INSTANCE, + new ClusterGetSettingsAction.Request() + ).actionGet(); + + assertThat( + getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), + equalTo("25s") + ); + // We didn't remove the setting set by file settings, we simply removed the reserved (operator) section. + assertThat(getSettingsResponse.persistentSettings().get("indices.recovery.max_bytes_per_sec"), equalTo("50mb")); + // cleanup + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) + .put("indices.recovery.max_bytes_per_sec", (String) null) + .build() + ) + ); + } + + private Tuple removedReservedClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(2); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + // we first wait for reserved state version to become 0, then we expect to see it non-zero + if (reservedState != null && reservedState.version() == 0L) { + // don't remove the state listener yet, we need it to see the version non-zero + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } else if (reservedState != null && reservedState.version() != 0L && savedClusterState.getCount() < 2) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + private Tuple cleanedClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedClusterSettingsAction.NAME); + if (handlerMetadata == null) { + fail("Should've found cluster settings in this metadata"); + } + if (handlerMetadata.keys().isEmpty()) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + + public void testRestoreWithPersistedFileSettings() throws Exception { + createRepository("test-repo", "fs"); + + logger.info("--> set some persistent cluster settings"); + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(25)) + .build() + ) + ); + + ensureGreen(); + + String masterNode = internalCluster().getMasterName(); + + var savedClusterState = setupClusterStateListener(masterNode); + + logger.info("--> write some file based settings, putting some reserved state"); + writeJSONFile(masterNode, testFileSettingsJSON); + final ClusterStateResponse savedStateResponse = assertClusterStateSaveOK(savedClusterState.v1(), savedClusterState.v2()); + assertThat( + savedStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), + equalTo("50mb") + ); + + logger.info("--> create full snapshot"); + createFullSnapshot("test-repo", "test-snap"); + assertThat(getSnapshot("test-repo", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(55)) + .build() + ) + ); + + logger.info("--> restore global state from the snapshot"); + var removedReservedState = removedReservedClusterStateListener(masterNode); + + clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + + ensureGreen(); + + // When the target cluster of a restore has an existing operator file, we don't un-reserve the reserved + // cluster state for file based settings, but instead we reset the version to 0 and 'touch' the operator file + // so that it gets re-processed. + logger.info("--> reserved state version will be reset to 0, because of snapshot restore"); + // double timeout, we restore snapshot then apply the file + assertTrue(removedReservedState.v1().await(40, TimeUnit.SECONDS)); + + logger.info("--> reserved state would be restored to non-zero version"); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state( + new ClusterStateRequest().metadata(true).waitForMetadataVersion(removedReservedState.v2().get()) + ).actionGet(); + + assertNotNull(clusterStateResponse.getState().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE)); + + final ClusterGetSettingsAction.Response getSettingsResponse = clusterAdmin().execute( + ClusterGetSettingsAction.INSTANCE, + new ClusterGetSettingsAction.Request() + ).actionGet(); + + assertThat( + getSettingsResponse.persistentSettings().get(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey()), + equalTo("25s") + ); + + // we need to remove the reserved state, so that clean-up can happen + var cleanupReservedState = cleanedClusterStateListener(masterNode); + + logger.info("--> clear the file based settings"); + writeJSONFile(masterNode, emptyFileSettingsJSON); + assertClusterStateSaveOK(cleanupReservedState.v1(), cleanupReservedState.v2()); + // cleanup + assertAcked( + clusterAdmin().prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), (String) null) + .put("indices.recovery.max_bytes_per_sec", (String) null) + .build() + ) + ); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java index f9c245d20a34..0e3565b63d7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; @@ -40,7 +41,7 @@ protected Collection> nodePlugins() { } public void testBasics() { - assertAcked(client().admin().cluster().preparePutStoredScript().setId("foobar").setContent(new BytesArray(formatted(""" + assertAcked(client().admin().cluster().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" {"script": {"lang": "%s", "source": "1"} } """, LANG)), XContentType.JSON)); String script = client().admin().cluster().prepareGetStoredScript("foobar").get().getSource().getSource(); @@ -53,9 +54,9 @@ public void testBasics() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin().cluster().preparePutStoredScript().setId("id#").setContent(new BytesArray(formatted(""" + () -> { client().admin().cluster().preparePutStoredScript().setId("id#").setContent(new BytesArray(Strings.format(""" {"script": {"lang": "%s", "source": "1"} } - """, LANG)), XContentType.JSON).get() + """, LANG)), XContentType.JSON).get(); } ); assertEquals("Validation Failed: 1: id cannot contain '#' for stored script;", e.getMessage()); } @@ -63,9 +64,9 @@ public void testBasics() { public void testMaxScriptSize() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().admin().cluster().preparePutStoredScript().setId("foobar").setContent(new BytesArray(formatted(""" + () -> { client().admin().cluster().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ - """, LANG)), XContentType.JSON).get() + """, LANG)), XContentType.JSON).get(); } ); assertEquals("exceeded max allowed stored script size in bytes [64] with size [65] for script [foobar]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java index 95f85226c271..653a110f6116 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/MetadataIT.java @@ -15,7 +15,6 @@ import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; import org.elasticsearch.test.ESIntegTestCase; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -24,7 +23,6 @@ import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; public class MetadataIT extends ESIntegTestCase { @@ -33,26 +31,13 @@ public void testMetadataSetOnAggregationResult() throws Exception { IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)]; for (int i = 0; i < builders.length; i++) { String name = "name_" + randomIntBetween(1, 10); - builders[i] = client().prepareIndex("idx") - .setSource(jsonBuilder().startObject().field("name", name).field("value", randomInt()).endObject()); + builders[i] = client().prepareIndex("idx").setSource("name", name, "value", randomInt()); } indexRandom(true, builders); ensureSearchable(); - final Map nestedMetadata = new HashMap() { - { - put("nested", "value"); - } - }; - - Map metadata = new HashMap() { - { - put("key", "value"); - put("numeric", 1.2); - put("bool", true); - put("complex", nestedMetadata); - } - }; + final var nestedMetadata = Map.of("nested", "value"); + var metadata = Map.of("key", "value", "numeric", 1.2, "bool", true, "complex", nestedMetadata); SearchResponse response = client().prepareSearch("idx") .addAggregation( @@ -95,8 +80,7 @@ private void assertMetadata(Map returnedMetadata) { Object nestedObject = returnedMetadata.get("complex"); assertNotNull(nestedObject); - @SuppressWarnings("unchecked") - Map nestedMap = (Map) nestedObject; + Map nestedMap = (Map) nestedObject; assertEquals("value", nestedMap.get("nested")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index 6272b29c068d..b17e12190405 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -520,7 +521,7 @@ public void testPartiallyUnmappedWithFormat() throws Exception { assertThat(terms.getBuckets().size(), equalTo(5)); for (int i = 0; i < 5; i++) { - String key = formatted("%07.2f", (double) i); + String key = Strings.format("%07.2f", (double) i); DoubleTerms.Bucket bucket = terms.getBucketByKey(key); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(key)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 81f416a48802..122168737c96 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -510,7 +511,7 @@ public void testPartiallyUnmappedWithFormat() throws Exception { assertThat(terms.getBuckets().size(), equalTo(5)); for (int i = 0; i < 5; i++) { - String key = formatted("%04d", i); + String key = Strings.format("%04d", i); LongTerms.Bucket bucket = terms.getBucketByKey(key); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(key)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index c9009b848456..b03991c0af5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -150,7 +150,8 @@ public void testXContentResponse() throws Exception { classes.toXContent(responseBuilder, ToXContent.EMPTY_PARAMS); responseBuilder.endObject(); - String result = formatted(""" + Object[] args = new Object[] { type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\"" }; + String result = Strings.format(""" { "class": { "doc_count_error_upper_bound": 0, @@ -191,7 +192,7 @@ public void testXContentResponse() throws Exception { ] } } - """, type.equals("long") ? "0" : "\"0\"", type.equals("long") ? "1" : "\"1\""); + """, args); assertThat(Strings.toString(responseBuilder), equalTo(XContentHelper.stripWhitespace(result))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index a486f553d2bf..89df04d455cb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.LogManager; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -49,6 +50,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/92822") public class HDRPercentilesIT extends AbstractNumericTestCase { @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 11f020672bf0..6efda3f11d7f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Strings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; @@ -303,23 +304,29 @@ public void setupSuiteScopeCluster() throws Exception { // When using the MockScriptPlugin we can map Stored scripts to inline scripts: // the id of the stored script is used in test method while the source of the stored script // must match a predefined script from CustomScriptPlugin.pluginScripts() method - assertAcked(client().admin().cluster().preparePutStoredScript().setId("initScript_stored").setContent(new BytesArray(formatted(""" - {"script": {"lang": "%s", "source": "vars.multiplier = 3"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + assertAcked( + client().admin().cluster().preparePutStoredScript().setId("initScript_stored").setContent(new BytesArray(Strings.format(""" + {"script": {"lang": "%s", "source": "vars.multiplier = 3"} } + """, MockScriptPlugin.NAME)), XContentType.JSON) + ); - assertAcked(client().admin().cluster().preparePutStoredScript().setId("mapScript_stored").setContent(new BytesArray(formatted(""" - {"script": {"lang": "%s", "source": "state.list.add(vars.multiplier)"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + assertAcked( + client().admin().cluster().preparePutStoredScript().setId("mapScript_stored").setContent(new BytesArray(Strings.format(""" + {"script": {"lang": "%s", "source": "state.list.add(vars.multiplier)"} } + """, MockScriptPlugin.NAME)), XContentType.JSON) + ); assertAcked( - client().admin().cluster().preparePutStoredScript().setId("combineScript_stored").setContent(new BytesArray(formatted(""" + client().admin().cluster().preparePutStoredScript().setId("combineScript_stored").setContent(new BytesArray(Strings.format(""" {"script": {"lang": "%s", "source": "sum state values as a new aggregation"} } """, MockScriptPlugin.NAME)), XContentType.JSON) ); - assertAcked(client().admin().cluster().preparePutStoredScript().setId("reduceScript_stored").setContent(new BytesArray(formatted(""" - {"script": {"lang": "%s", "source": "sum all states (lists) values as a new aggregation"} } - """, MockScriptPlugin.NAME)), XContentType.JSON)); + assertAcked( + client().admin().cluster().preparePutStoredScript().setId("reduceScript_stored").setContent(new BytesArray(Strings.format(""" + {"script": {"lang": "%s", "source": "sum all states (lists) values as a new aggregation"} } + """, MockScriptPlugin.NAME)), XContentType.JSON) + ); indexRandom(true, builders); ensureSearchable(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index b17a5df319e6..33db3ac4103a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -29,6 +29,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -67,6 +68,9 @@ public void testClusterAllowPartialsWithRedState() throws Exception { assertThat("Expected total shards", searchResponse.getTotalShards(), equalTo(numShards)); for (ShardSearchFailure failure : searchResponse.getShardFailures()) { assertThat(failure.getCause(), instanceOf(NoShardAvailableActionException.class)); + assertThat(failure.getCause().getStackTrace(), emptyArray()); + // We don't write out the entire, repetitive stacktrace in the reason + assertThat(failure.reason(), equalTo("org.elasticsearch.action.NoShardAvailableActionException" + System.lineSeparator())); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 6656e0aa1f93..c492dfe60b6e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -8,6 +8,9 @@ package org.elasticsearch.search.fieldcaps; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; @@ -17,6 +20,10 @@ import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Cancellable; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; @@ -31,6 +38,7 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.SourceLoader; +import org.elasticsearch.index.mapper.StringStoredFieldFieldLoader; import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -43,14 +51,18 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.DummyQueryBuilder; +import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; import org.junit.Before; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -58,6 +70,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; @@ -65,12 +80,14 @@ import java.util.stream.IntStream; import static java.util.Collections.singletonList; +import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.array; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; @@ -103,6 +120,7 @@ public void setUp() throws Exception { .endObject() .startObject("playlist") .field("type", "text") + .field("store", true) .endObject() .startObject("some_dimension") .field("type", "keyword") @@ -110,7 +128,7 @@ public void setUp() throws Exception { .endObject() .startObject("some_metric") .field("type", "long") - .field("time_series_metric", TimeSeriesParams.MetricType.counter) + .field("time_series_metric", TimeSeriesParams.MetricType.COUNTER) .endObject() .startObject("secret_soundtrack") .field("type", "alias") @@ -153,7 +171,7 @@ public void setUp() throws Exception { .endObject() .startObject("some_metric") .field("type", "long") - .field("time_series_metric", TimeSeriesParams.MetricType.gauge) + .field("time_series_metric", TimeSeriesParams.MetricType.GAUGE) .endObject() .endObject() .endObject() @@ -164,7 +182,17 @@ public void setUp() throws Exception { @Override protected Collection> nodePlugins() { - return List.of(TestMapperPlugin.class, ExceptionOnRewriteQueryPlugin.class); + return List.of(TestMapperPlugin.class, ExceptionOnRewriteQueryPlugin.class, BlockingOnRewriteQueryPlugin.class); + } + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected boolean ignoreExternalCluster() { + return true; } public void testFieldAlias() { @@ -368,7 +396,7 @@ public void testFieldMetricsAndDimensions() { assertTrue(response.get().get("some_dimension").get("keyword").isDimension()); assertNull(response.get().get("some_dimension").get("keyword").nonDimensionIndices()); assertTrue(response.get().containsKey("some_metric")); - assertEquals(TimeSeriesParams.MetricType.counter, response.get().get("some_metric").get("long").getMetricType()); + assertEquals(TimeSeriesParams.MetricType.COUNTER, response.get().get("some_metric").get("long").getMetricType()); assertNull(response.get().get("some_metric").get("long").metricConflictsIndices()); response = client().prepareFieldCaps("old_index", "new_index").setFields("some_dimension", "some_metric").get(); @@ -641,6 +669,52 @@ public void testManyIndicesWithSameMapping() { assertTrue(resp.getField("extra_field").get("integer").isAggregatable()); } + public void testCancel() throws Exception { + BlockingOnRewriteQueryBuilder.blockOnRewrite(); + PlainActionFuture future = PlainActionFuture.newFuture(); + Request restRequest = new Request("POST", "/_field_caps?fields=*"); + restRequest.setEntity(new StringEntity(""" + { + "index_filter": { + "blocking_query": {} + } + } + """, ContentType.APPLICATION_JSON.withCharset(StandardCharsets.UTF_8))); + Cancellable cancellable = getRestClient().performRequestAsync(restRequest, wrapAsRestResponseListener(future)); + logger.info("--> waiting for field-caps tasks to be started"); + assertBusy(() -> { + List tasks = client().admin() + .cluster() + .prepareListTasks() + .setActions("indices:data/read/field_caps", "indices:data/read/field_caps[n]") + .get() + .getTasks(); + assertThat(tasks.size(), greaterThanOrEqualTo(2)); + for (TaskInfo task : tasks) { + assertTrue(task.cancellable()); + assertFalse(task.cancelled()); + } + }, 30, TimeUnit.SECONDS); + + cancellable.cancel(); + logger.info("--> waiting for field-caps tasks to be cancelled"); + assertBusy(() -> { + List tasks = client().admin() + .cluster() + .prepareListTasks() + .setActions("indices:data/read/field_caps", "indices:data/read/field_caps[n]") + .get() + .getTasks(); + for (TaskInfo task : tasks) { + assertTrue(task.cancellable()); + assertTrue(task.cancelled()); + } + }, 30, TimeUnit.SECONDS); + + BlockingOnRewriteQueryBuilder.unblockOnRewrite(); + expectThrows(CancellationException.class, future::actionGet); + } + private void assertIndices(FieldCapabilitiesResponse response, String... indices) { assertNotNull(response.getIndices()); Arrays.sort(indices); @@ -680,7 +754,6 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws if (searchExecutionContext.indexMatches("*error*")) { throw new IllegalArgumentException("I throw because I choose to."); } - ; } return this; } @@ -691,6 +764,60 @@ public String getWriteableName() { } } + public static class BlockingOnRewriteQueryPlugin extends Plugin implements SearchPlugin { + + public BlockingOnRewriteQueryPlugin() {} + + @Override + public List> getQueries() { + return List.of( + new QuerySpec<>("blocking_query", BlockingOnRewriteQueryBuilder::new, BlockingOnRewriteQueryBuilder::fromXContent) + ); + } + } + + static class BlockingOnRewriteQueryBuilder extends DummyQueryBuilder { + private static CountDownLatch blockingLatch = new CountDownLatch(1); + public static final String NAME = "blocking_query"; + + BlockingOnRewriteQueryBuilder() { + + } + + BlockingOnRewriteQueryBuilder(StreamInput in) throws IOException { + super(in); + } + + static void blockOnRewrite() { + blockingLatch = new CountDownLatch(1); + } + + static void unblockOnRewrite() { + blockingLatch.countDown(); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + try { + blockingLatch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + return this; + } + + public static BlockingOnRewriteQueryBuilder fromXContent(XContentParser parser) { + ObjectParser objectParser = new ObjectParser<>(NAME, BlockingOnRewriteQueryBuilder::new); + declareStandardFields(objectParser); + return objectParser.apply(parser, null); + } + + @Override + public String getWriteableName() { + return NAME; + } + } + public static final class TestMapperPlugin extends Plugin implements MapperPlugin { @Override public Map getMetadataMappers() { @@ -721,7 +848,13 @@ protected String contentType() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - throw new UnsupportedOperationException(); + return new StringStoredFieldFieldLoader(name(), simpleName(), null) { + @Override + protected void write(XContentBuilder b, Object value) throws IOException { + BytesRef ref = (BytesRef) value; + b.utf8Value(ref.bytes, ref.offset, ref.length); + } + }; } private static final TypeParser PARSER = new FixedTypeParser(c -> new TestMetadataMapper()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 8a4761396d05..225d3218db23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -28,7 +28,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.lookup.FieldLookup; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -53,6 +53,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.function.Function; +import java.util.function.Supplier; import static java.util.Collections.singleton; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -142,7 +143,7 @@ static Object fieldsScript(Map vars, String fieldName) { static Object sourceScript(Map vars, String path) { @SuppressWarnings("unchecked") - Map source = ((SourceLookup) vars.get("_source")).source(); + Map source = ((Supplier) vars.get("_source")).get().source(); return XContentMapValues.extractValue(path, source); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index e7e538e47fa3..85525e831da3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery.ScoreMode; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -771,27 +772,27 @@ public void testDateWithoutOrigin() throws Exception { ZonedDateTime docDate = dt.minusDays(1); String docDateString = docDate.getYear() + "-" - + formatted("%02d", docDate.getMonthValue()) + + Strings.format("%02d", docDate.getMonthValue()) + "-" - + formatted("%02d", docDate.getDayOfMonth()); + + Strings.format("%02d", docDate.getDayOfMonth()); client().index( indexRequest("test").id("1").source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) ).actionGet(); docDate = dt.minusDays(2); docDateString = docDate.getYear() + "-" - + formatted("%02d", docDate.getMonthValue()) + + Strings.format("%02d", docDate.getMonthValue()) + "-" - + formatted("%02d", docDate.getDayOfMonth()); + + Strings.format("%02d", docDate.getDayOfMonth()); client().index( indexRequest("test").id("2").source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) ).actionGet(); docDate = dt.minusDays(3); docDateString = docDate.getYear() + "-" - + formatted("%02d", docDate.getMonthValue()) + + Strings.format("%02d", docDate.getMonthValue()) + "-" - + formatted("%02d", docDate.getDayOfMonth()); + + Strings.format("%02d", docDate.getDayOfMonth()); client().index( indexRequest("test").id("3").source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject()) ).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 2c443924d8b4..ea6f45a21caf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.functionscore; import org.apache.lucene.search.Explanation; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -136,8 +136,8 @@ public DecayFunction getDecayFunction() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_EMPTY; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.ZERO; } private static final DecayFunction decayFunction = new LinearMultScoreFunction(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java index ecca33f08d9d..47f3e0adf04f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.msearch; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.settings.Settings; @@ -96,8 +96,8 @@ public void testCCSCheckCompatibility() throws Exception { .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy"))) .add(client().prepareSearch("test").setQuery(new DummyQueryBuilder() { @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT; } })) .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index cc14f36d02c9..f63ce82d3490 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -46,6 +46,7 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.SuiteScopeTestCase @@ -690,7 +691,7 @@ public void testFilterByFilter() throws InterruptedException, IOException { .entry("delegate", "FilterByFilterAggregator") .entry( "delegate_debug", - matchesMap().entry("segments_with_deleted_docs", 0) + matchesMap().entry("segments_with_deleted_docs", greaterThanOrEqualTo(0)) .entry("segments_with_doc_count_field", 0) .entry("segments_counted", 0) .entry("segments_collected", greaterThan(0)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java new file mode 100644 index 000000000000..8b4d986f224d --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.profile.dfs; + +import org.apache.lucene.tests.util.English; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.profile.ProfileResult; +import org.elasticsearch.search.profile.SearchProfileDfsPhaseResult; +import org.elasticsearch.search.profile.SearchProfileShardResult; +import org.elasticsearch.search.profile.query.CollectorResult; +import org.elasticsearch.search.profile.query.QueryProfileShardResult; +import org.elasticsearch.search.vectors.KnnSearchBuilder; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; + +public class DfsProfilerIT extends ESIntegTestCase { + + private static final int KNN_DIM = 3; + + public void testProfileDfs() throws Exception { + String textField = "text_field"; + String numericField = "number"; + String vectorField = "vector"; + String indexName = "text-dfs-profile"; + createIndex(indexName, vectorField); + ensureGreen(); + + int numDocs = randomIntBetween(10, 50); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex(indexName) + .setId(String.valueOf(i)) + .setSource( + textField, + English.intToEnglish(i), + numericField, + i, + vectorField, + new float[] { randomFloat(), randomFloat(), randomFloat() } + ); + } + indexRandom(true, docs); + refresh(); + int iters = between(5, 10); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(List.of(textField), List.of(numericField), numDocs, 3); + logger.info("Query: {}", q); + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.DFS_QUERY_THEN_FETCH) + .setKnnSearch( + randomList( + 2, + 5, + () -> new KnnSearchBuilder( + vectorField, + new float[] { randomFloat(), randomFloat(), randomFloat() }, + randomIntBetween(5, 10), + 50 + ) + ) + ) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + for (Map.Entry shard : resp.getProfileResults().entrySet()) { + for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + SearchProfileDfsPhaseResult searchProfileDfsPhaseResult = shard.getValue().getSearchProfileDfsPhaseResult(); + assertThat(searchProfileDfsPhaseResult, is(notNullValue())); + for (QueryProfileShardResult queryProfileShardResult : searchProfileDfsPhaseResult.getQueryProfileShardResult()) { + for (ProfileResult result : queryProfileShardResult.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + CollectorResult result = queryProfileShardResult.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + assertThat(result.getTime(), greaterThan(0L)); + } + ProfileResult statsResult = searchProfileDfsPhaseResult.getDfsShardResult(); + assertThat(statsResult.getQueryName(), equalTo("statistics")); + } + } + } + + private void createIndex(String name, String vectorField) throws IOException { + assertAcked( + prepareCreate(name).setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(vectorField) + .field("type", "dense_vector") + .field("dims", KNN_DIM) + .field("index", true) + .field("similarity", "cosine") + .endObject() + .endObject() + .endObject() + ) + ); + } + +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 90b09bcec27e..c8d1eaf8425f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; @@ -166,7 +167,7 @@ public void testIssue6614() throws ExecutionException, InterruptedException { "foo", "bar", "timeUpdated", - "2014/07/" + formatted("%02d", i + 1) + " " + formatted("%02d", j + 1) + ":00:00" + "2014/07/" + Strings.format("%02d", i + 1) + " " + Strings.format("%02d", j + 1) + ":00:00" ) ); } @@ -191,7 +192,7 @@ public void testIssue6614() throws ExecutionException, InterruptedException { .setQuery( QueryBuilders.boolQuery() .must(QueryBuilders.termQuery("foo", "bar")) - .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/" + formatted("%02d", randomIntBetween(1, 7)) + "/01")) + .must(QueryBuilders.rangeQuery("timeUpdated").gte("2014/" + Strings.format("%02d", randomIntBetween(1, 7)) + "/01")) ) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(scaledRandomIntBetween(1, docs)) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java index 2273d46caf82..0b2ffae14377 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/SearchStatsIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -34,6 +34,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -60,9 +61,10 @@ protected Collection> nodePlugins() { public static class CustomScriptPlugin extends MockScriptPlugin { @Override + @SuppressWarnings("unchecked") protected Map, Object>> pluginScripts() { return Collections.singletonMap("_source.field", vars -> { - Map src = ((SourceLookup) vars.get("_source")).source(); + Map src = ((Supplier) vars.get("_source")).get().source(); return src.get("field"); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 439401383e84..ed9e3ea9c30a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -1248,7 +1249,7 @@ private static void assertSuggestions( } } else { for (String expectedSuggestion : suggestions) { - String errMsg = String.format(Locale.ROOT, "Expected elem %s to be in list %s", expectedSuggestion, suggestionList); + String errMsg = Strings.format("Expected elem %s to be in list %s", expectedSuggestion, suggestionList); assertThat(errMsg, suggestionList, hasItem(expectedSuggestion)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 9c656109dfd2..28c4b85df389 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -219,7 +219,7 @@ public void testDeletesAreBatched() throws Exception { logger.info("--> waiting for batched deletes to finish"); final PlainActionFuture> allDeletesDone = new PlainActionFuture<>(); - final ActionListener deletesListener = new GroupedActionListener<>(allDeletesDone, deleteFutures.size()); + final ActionListener deletesListener = new GroupedActionListener<>(deleteFutures.size(), allDeletesDone); for (StepListener deleteFuture : deleteFutures) { deleteFuture.addListener(deletesListener); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 696e0de5d557..3b5df51f19d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.IndexMetaDataGenerations; import org.elasticsearch.repositories.Repository; @@ -39,7 +40,6 @@ import java.nio.file.StandardOpenOption; import java.util.Collections; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; @@ -289,7 +289,7 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds()); logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt); - Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID()))); + Files.delete(repo.resolve(Strings.format(BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID()))); logger.info("--> strip version information from index-N blob"); final RepositoryData withoutVersions = new RepositoryData( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 266ef7d74f54..5c075966499e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.snapshots; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -215,8 +215,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT; } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 964b7ea97632..85c453ad6be3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -51,6 +51,7 @@ import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestResponseUtils; import org.elasticsearch.rest.action.admin.cluster.RestClusterStateAction; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.snapshots.mockstore.MockRepository; @@ -525,8 +526,9 @@ public void sendResponse(RestResponse response) { @Override public void sendResponse(RestResponse response) { try { - assertThat(response.content().utf8ToString(), containsString("notsecretusername")); - assertThat(response.content().utf8ToString(), not(containsString("verysecretpassword"))); + final var responseBody = RestResponseUtils.getBodyContent(response).utf8ToString(); + assertThat(responseBody, containsString("notsecretusername")); + assertThat(responseBody, not(containsString("verysecretpassword"))); } catch (AssertionError ex) { clusterStateError.set(ex); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index f36e8e8e6e90..91c6ea3c5b92 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.threadpool.ThreadPool; @@ -29,8 +30,10 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class GetSnapshotsIT extends AbstractSnapshotIntegTestCase { @@ -578,6 +581,23 @@ public void testSortAfter() throws Exception { assertThat(paginatedResponse2.totalCount(), is(3)); } + public void testRetrievingSnapshotsWhenRepositoryIsMissing() throws Exception { + final String repoName = "test-repo"; + final Path repoPath = randomRepoPath(); + createRepository(repoName, "fs", repoPath); + final String missingRepoName = "missing"; + + final List snapshotNames = createNSnapshots(repoName, randomIntBetween(1, 10)); + snapshotNames.sort(String::compareTo); + + final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(repoName, missingRepoName) + .setSort(GetSnapshotsRequest.SortBy.NAME) + .get(); + assertThat(response.getSnapshots().stream().map(info -> info.snapshotId().getName()).toList(), equalTo(snapshotNames)); + assertTrue(response.getFailures().containsKey(missingRepoName)); + assertThat(response.getFailures().get(missingRepoName), instanceOf(RepositoryMissingException.class)); + } + // Create a snapshot that is guaranteed to have a unique start time and duration for tests around ordering by either. // Don't use this with more than 3 snapshots on platforms with low-resolution clocks as the durations could always collide there // causing an infinite loop diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 771cb87e34f3..5bc943fcc8be 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; @@ -274,6 +275,18 @@ public void testRepositoryConflict() throws Exception { logger.info("--> waiting for block to kick in on node [{}]", blockedNode); waitForBlock(blockedNode, repo); + assertTrue( + client().admin() + .cluster() + .prepareListTasks() + .setActions(DeleteSnapshotAction.NAME) + .setDetailed(true) + .get() + .getTasks() + .stream() + .anyMatch(ti -> ("[" + repo + "][" + snapshot1 + "]").equals(ti.description())) + ); + logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); RepositoryConflictException e1 = expectThrows( RepositoryConflictException.class, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java deleted file mode 100644 index d89c38f323a8..000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryFilterUserMetadataIT.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.snapshots; - -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.env.Environment; -import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.repositories.FinalizeSnapshotContext; -import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.SnapshotShardContext; -import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.NamedXContentRegistry; - -import java.util.Collection; -import java.util.Collections; -import java.util.Map; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.is; - -public class RepositoryFilterUserMetadataIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Collections.singleton(MetadataFilteringPlugin.class); - } - - public void testFilteredRepoMetadataIsUsed() { - final String masterName = internalCluster().getMasterName(); - final String repoName = "test-repo"; - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType(MetadataFilteringPlugin.TYPE) - .setSettings( - Settings.builder().put("location", randomRepoPath()).put(MetadataFilteringPlugin.MASTER_SETTING_VALUE, masterName) - ) - ); - createIndex("test-idx"); - final SnapshotInfo snapshotInfo = client().admin() - .cluster() - .prepareCreateSnapshot(repoName, "test-snap") - .setWaitForCompletion(true) - .get() - .getSnapshotInfo(); - assertThat(snapshotInfo.userMetadata(), is(Collections.singletonMap(MetadataFilteringPlugin.MOCK_FILTERED_META, masterName))); - } - - // Mock plugin that stores the name of the master node that started a snapshot in each snapshot's metadata - public static final class MetadataFilteringPlugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { - - private static final String MOCK_FILTERED_META = "mock_filtered_meta"; - - private static final String MASTER_SETTING_VALUE = "initial_master"; - - private static final String TYPE = "mock_meta_filtering"; - - @Override - public Map getRepositories( - Environment env, - NamedXContentRegistry namedXContentRegistry, - ClusterService clusterService, - BigArrays bigArrays, - RecoverySettings recoverySettings - ) { - return Collections.singletonMap( - "mock_meta_filtering", - metadata -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, bigArrays, recoverySettings) { - - // Storing the initially expected metadata value here to verify that #filterUserMetadata is only called once on the - // initial master node starting the snapshot - private final String initialMetaValue = metadata.settings().get(MASTER_SETTING_VALUE); - - @Override - public void finalizeSnapshot(FinalizeSnapshotContext finalizeSnapshotContext) { - super.finalizeSnapshot(finalizeSnapshotContext); - } - - @Override - public void snapshotShard(SnapshotShardContext context) { - assertThat(context.userMetadata(), is(Collections.singletonMap(MOCK_FILTERED_META, initialMetaValue))); - super.snapshotShard(context); - } - - @Override - public Map adaptUserMetadata(Map userMetadata) { - return Collections.singletonMap(MOCK_FILTERED_META, clusterService.getNodeName()); - } - } - ); - } - } -} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java index 36ecee336712..80b9c437a5dd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceIT.java @@ -67,7 +67,7 @@ public void testRepositoryIntegrityHealthIndicator() throws IOException, Interru } private void assertSnapshotRepositoryHealth(String message, Client client, HealthStatus status) { - var response = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(randomBoolean())).actionGet(); + var response = client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(randomBoolean(), 1000)).actionGet(); assertThat(message, response.findIndicator(NAME).status(), equalTo(status)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 131ace6a3364..f07057c496ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -661,8 +661,8 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { final var waitForCompletion = randomBoolean(); final var createsListener = new PlainActionFuture(); final var createsGroupedListener = new GroupedActionListener( - createsListener.map(ignored -> null), - snapshotNames.length + snapshotNames.length, + createsListener.map(ignored -> null) ); for (final var snapshotName : snapshotNames) { clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java new file mode 100644 index 000000000000..2543bcdfc5f6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/transport/RemoteClusterPortSettingsIT.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; + +import static org.elasticsearch.test.NodeRoles.dataOnlyNode; +import static org.elasticsearch.test.NodeRoles.masterNode; +import static org.hamcrest.Matchers.containsString; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class RemoteClusterPortSettingsIT extends ESIntegTestCase { + + public void testDirectlyConfiguringTransportProfileForRemoteClusterWillFailToStartTheNode() { + assumeTrue("untrusted remote cluster feature flag must be enabled", TcpTransport.isUntrustedRemoteClusterEnabled()); + + internalCluster().setBootstrapMasterNodeIndex(0); + + final Settings.Builder builder = Settings.builder() + .put(randomBoolean() ? masterNode() : dataOnlyNode()) + .put("discovery.initial_state_timeout", "1s") + .put("remote_cluster.enabled", true); + + // Test that the same error message is always reported for direct usage of the _remote_cluster profile + switch (randomIntBetween(0, 2)) { + case 0 -> builder.put("transport.profiles._remote_cluster.tcp.keep_alive", true); + case 1 -> builder.put("transport.profiles._remote_cluster.port", 9900); + default -> builder.put("transport.profiles._remote_cluster.port", 9900) + .put("transport.profiles._remote_cluster.tcp.keep_alive", true); + } + + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> internalCluster().startNode(builder)); + assertThat( + e.getMessage(), + containsString( + "Remote Access settings should not be configured using the [_remote_cluster] profile. " + + "Use the [remote_cluster.] settings instead." + ) + ); + } + +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index 052c61635bab..7004e79c0866 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -19,13 +19,13 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.test.ESIntegTestCase; import java.util.HashMap; import java.util.HashSet; -import java.util.Locale; import java.util.Map; import java.util.Random; import java.util.Set; @@ -471,7 +471,7 @@ public String next() { // zero-pad sequential logger.info("--> use zero-padded sequential ids"); yield new IDSource() { - final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random, 4, 20) + "d", 0); + final String zeroPad = Strings.format("%0" + TestUtil.nextInt(random, 4, 20) + "d", 0); int upto; @Override diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index ca9f2c078871..51706347a38d 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -6,6 +6,8 @@ * Side Public License, v 1. */ +import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; + /** The Elasticsearch Server Module. */ module org.elasticsearch.server { requires java.logging; @@ -13,6 +15,7 @@ requires java.sql; requires java.management; requires jdk.unsupported; + requires java.net.http; // required by ingest-geoip's dependency maxmind.geoip2 https://github.com/elastic/elasticsearch/issues/93553 requires org.elasticsearch.cli; requires org.elasticsearch.base; @@ -22,8 +25,8 @@ requires org.elasticsearch.securesm; requires org.elasticsearch.xcontent; requires org.elasticsearch.logging; - requires org.elasticsearch.plugin.api; - requires org.elasticsearch.plugin.analysis.api; + requires org.elasticsearch.plugin; + requires org.elasticsearch.plugin.analysis; requires com.sun.jna; requires hppc; @@ -53,6 +56,7 @@ exports org.elasticsearch.action; exports org.elasticsearch.action.admin.cluster.allocation; exports org.elasticsearch.action.admin.cluster.configuration; + exports org.elasticsearch.action.admin.cluster.coordination; exports org.elasticsearch.action.admin.cluster.desirednodes; exports org.elasticsearch.action.admin.cluster.health; exports org.elasticsearch.action.admin.cluster.migration; @@ -228,6 +232,7 @@ exports org.elasticsearch.index.cache.query; exports org.elasticsearch.index.cache.request; exports org.elasticsearch.index.codec; + exports org.elasticsearch.index.codec.tsdb; exports org.elasticsearch.index.codec.bloomfilter; exports org.elasticsearch.index.engine; exports org.elasticsearch.index.fielddata; @@ -281,7 +286,6 @@ exports org.elasticsearch.monitor.os; exports org.elasticsearch.monitor.process; exports org.elasticsearch.node; - exports org.elasticsearch.reservedstate; exports org.elasticsearch.persistent; exports org.elasticsearch.persistent.decider; exports org.elasticsearch.plugins; @@ -290,6 +294,7 @@ exports org.elasticsearch.repositories; exports org.elasticsearch.repositories.blobstore; exports org.elasticsearch.repositories.fs; + exports org.elasticsearch.reservedstate; exports org.elasticsearch.rest; exports org.elasticsearch.rest.action; exports org.elasticsearch.rest.action.admin.cluster; @@ -367,5 +372,9 @@ uses org.elasticsearch.reservedstate.ReservedClusterStateHandlerProvider; - provides org.apache.lucene.codecs.PostingsFormat with org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat; + provides org.apache.lucene.codecs.PostingsFormat + with + org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat, + org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; + provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; } diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9cddfd504a0e..7279e0c4aa4c 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -151,7 +151,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final Type type; // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - if (in.getVersion().before(Version.V_8_3_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_3_0)) { // this was the flavor, which is always the default distribution now in.readString(); } @@ -167,7 +167,7 @@ public static Build readBuild(StreamInput in) throws IOException { } public static void writeBuild(Build build, StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_3_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_3_0)) { // this was the flavor, which is always the default distribution now out.writeString("default"); } diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 51718a226173..ded3862d47d2 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -275,7 +275,7 @@ public void writeTo(StreamOutput out) throws IOException { public static ElasticsearchException readException(StreamInput input, int id) throws IOException { CheckedFunction elasticsearchException = ID_TO_SUPPLIER.get(id); if (elasticsearchException == null) { - if (id == 127 && input.getVersion().before(Version.V_7_5_0)) { + if (id == 127 && input.getTransportVersion().before(TransportVersion.V_7_5_0)) { // was SearchContextException return new SearchException(input); } @@ -287,10 +287,10 @@ public static ElasticsearchException readException(StreamInput input, int id) th /** * Returns true iff the given class is a registered for an exception to be read. */ - public static boolean isRegistered(Class exception, Version version) { + public static boolean isRegistered(Class exception, TransportVersion version) { ElasticsearchExceptionHandle elasticsearchExceptionHandle = CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE.get(exception); if (elasticsearchExceptionHandle != null) { - return version.onOrAfter(elasticsearchExceptionHandle.versionAdded); + return version.onOrAfter(elasticsearchExceptionHandle.versionAdded.transportVersion); } return false; } @@ -1090,12 +1090,7 @@ private enum ElasticsearchExceptionHandle { 71, UNKNOWN_VERSION_ADDED ), - SEARCH_PARSE_EXCEPTION( - org.elasticsearch.search.SearchParseException.class, - org.elasticsearch.search.SearchParseException::new, - 72, - UNKNOWN_VERSION_ADDED - ), + // 72 was SearchParseException, only used in tests after 7.11 CONCURRENT_SNAPSHOT_EXECUTION_EXCEPTION( org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException.class, org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException::new, diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java new file mode 100644 index 000000000000..8f986efe2721 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -0,0 +1,320 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +/** + * Represents the version of the wire protocol used to communicate between ES nodes. + *

+ * Prior to 8.7.0, the node {@link Version} was used everywhere. This class separates the wire protocol version + * from the running node version. Each node version has a reference to a specific transport version used by that node. + *

+ * Each transport version constant has an id number, which for versions prior to 8.7.0 is the same as the node version + * for backwards compatibility. + * There is also a unique id string. This is not actually used in the protocol, but is there to ensure each protocol version + * is only added to the source file once. This string needs to be unique (here, a UUID, but can be any other unique nonempty string). + * If two concurrent PRs added the same protocol version, the unique string causes a git conflict, ensuring the second PR to be merged + * must be updated with the next free version. Without the unique id string, git will happily merge the two versions together, + * causing problems when you try to upgrade between those two PRs. + *

+ * When adding new transport versions, it is recommended to leave a gap in the id number (say, 100) + * to leave space for any intermediate fixes that may be needed in the future. + *

+ * The earliest compatible version is hardcoded at {@link #MINIMUM_COMPATIBLE}. Previously, this was dynamically calculated + * from the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version numbers. + * So the minimum compatible version needs to be hard-coded as the transport version of the minimum compatible node version. + * That variable should be updated appropriately whenever we do a major version release. + */ +public class TransportVersion implements Comparable { + public static final TransportVersion ZERO = new TransportVersion(0, "00000000-0000-0000-0000-000000000000"); + public static final TransportVersion V_7_0_0 = new TransportVersion(7_00_00_99, "7505fd05-d982-43ce-a63f-ff4c6c8bdeec"); + public static final TransportVersion V_7_0_1 = new TransportVersion(7_00_01_99, "ae772780-e6f9-46a1-b0a0-20ed0cae37f7"); + public static final TransportVersion V_7_1_0 = new TransportVersion(7_01_00_99, "fd09007c-1c54-450a-af99-9f941e1a53c2"); + public static final TransportVersion V_7_1_1 = new TransportVersion(7_01_01_99, "f7ddb16c-3495-42ef-8d54-1461570ca68c"); + public static final TransportVersion V_7_2_0 = new TransportVersion(7_02_00_99, "b74dbc52-e727-472c-af21-2156482e8796"); + public static final TransportVersion V_7_2_1 = new TransportVersion(7_02_01_99, "a3217b94-f436-4aab-a020-162c83ba18f2"); + public static final TransportVersion V_7_3_0 = new TransportVersion(7_03_00_99, "4f04e4c9-c5aa-49e4-8b99-abeb4e284a5a"); + public static final TransportVersion V_7_3_1 = new TransportVersion(7_03_01_99, "532b9bc9-e11f-48a2-b997-67ca68ffb354"); + public static final TransportVersion V_7_3_2 = new TransportVersion(7_03_02_99, "60da3953-8415-4d4f-a18d-853c3e68ebd6"); + public static final TransportVersion V_7_4_0 = new TransportVersion(7_04_00_99, "ec7e58aa-55b4-4064-a9dd-fd723a2ba7a8"); + public static final TransportVersion V_7_4_1 = new TransportVersion(7_04_01_99, "a316c26d-8e6a-4608-b1ec-062331552b98"); + public static final TransportVersion V_7_4_2 = new TransportVersion(7_04_02_99, "031a77e1-3640-4c8a-80cf-28ded96bab48"); + public static final TransportVersion V_7_5_0 = new TransportVersion(7_05_00_99, "cc6e14dc-9dc7-4b74-8e15-1f99a6cfbe03"); + public static final TransportVersion V_7_5_1 = new TransportVersion(7_05_01_99, "9d12be44-16dc-44a8-a89a-45c9174ea596"); + public static final TransportVersion V_7_5_2 = new TransportVersion(7_05_02_99, "484ed9de-7f5b-4e6b-a79a-0cb5e7570093"); + public static final TransportVersion V_7_6_0 = new TransportVersion(7_06_00_99, "4637b8ae-f3df-43ae-a065-ad4c29f3373a"); + public static final TransportVersion V_7_6_1 = new TransportVersion(7_06_01_99, "fe5b9f95-a311-4a92-943b-30ec256a331c"); + public static final TransportVersion V_7_6_2 = new TransportVersion(7_06_02_99, "5396cb30-d91c-4789-85e8-77efd552c785"); + public static final TransportVersion V_7_7_0 = new TransportVersion(7_07_00_99, "7bb73c48-ddb8-4437-b184-30371c35dd4b"); + public static final TransportVersion V_7_7_1 = new TransportVersion(7_07_01_99, "85507b0f-0fca-4daf-a80b-451fe75e04a0"); + public static final TransportVersion V_7_8_0 = new TransportVersion(7_08_00_99, "c3cc74af-d15e-494b-a907-6ad6dd2f4660"); + public static final TransportVersion V_7_8_1 = new TransportVersion(7_08_01_99, "7acb9f6e-32f2-45ce-b87d-ca1f165b8e7a"); + public static final TransportVersion V_7_9_0 = new TransportVersion(7_09_00_99, "9388fe76-192a-4053-b51c-d2a7b8eae545"); + public static final TransportVersion V_7_9_1 = new TransportVersion(7_09_01_99, "30fa10fc-df6b-4435-bd9e-acdb9ae1b268"); + public static final TransportVersion V_7_9_2 = new TransportVersion(7_09_02_99, "b58bb181-cecc-464e-b955-f6c1c1e7b4d0"); + public static final TransportVersion V_7_9_3 = new TransportVersion(7_09_03_99, "4406926c-e2b6-4b9a-a72a-1bee8357ad3e"); + public static final TransportVersion V_7_10_0 = new TransportVersion(7_10_00_99, "4efca195-38e4-4f74-b877-c26fb2a40733"); + public static final TransportVersion V_7_10_1 = new TransportVersion(7_10_01_99, "0070260c-aa0b-4fc2-9c87-5cd5f23b005f"); + public static final TransportVersion V_7_10_2 = new TransportVersion(7_10_02_99, "b369e2ed-261c-4b2f-8b42-0f0ba0549f8c"); + public static final TransportVersion V_7_11_0 = new TransportVersion(7_11_00_99, "3b43bcbc-1c5e-4cc2-a3b4-8ac8b64239e8"); + public static final TransportVersion V_7_11_1 = new TransportVersion(7_11_01_99, "2f75d13c-adde-4762-a46e-def8acce62b7"); + public static final TransportVersion V_7_11_2 = new TransportVersion(7_11_02_99, "2c852a4b-236d-4e8b-9373-336c9b52685a"); + public static final TransportVersion V_7_12_0 = new TransportVersion(7_12_00_99, "3be9ff6f-2d9f-4fc2-ba91-394dd5ebcf33"); + public static final TransportVersion V_7_12_1 = new TransportVersion(7_12_01_99, "ee4fdfac-2039-4b00-b42d-579cbde7120c"); + public static final TransportVersion V_7_13_0 = new TransportVersion(7_13_00_99, "e1fe494a-7c66-4571-8f8f-1d7e6d8df1b3"); + public static final TransportVersion V_7_13_1 = new TransportVersion(7_13_01_99, "66bc8d82-36da-4d54-b22d-aca691dc3d70"); + public static final TransportVersion V_7_13_2 = new TransportVersion(7_13_02_99, "2a6fc74c-4c44-4264-a619-37437cd2c5a0"); + public static final TransportVersion V_7_13_3 = new TransportVersion(7_13_03_99, "a31592f5-f8d2-490c-a02e-da9501823d8d"); + public static final TransportVersion V_7_13_4 = new TransportVersion(7_13_04_99, "3143240d-1831-4186-8a19-963336c4cea0"); + public static final TransportVersion V_7_14_0 = new TransportVersion(7_14_00_99, "8cf0954c-b085-467f-b20b-3cb4b2e69e3e"); + public static final TransportVersion V_7_14_1 = new TransportVersion(7_14_01_99, "3dbb62c3-cf73-4c76-8d5a-4ca70afe2c70"); + public static final TransportVersion V_7_14_2 = new TransportVersion(7_14_02_99, "7943ae20-df60-45e5-97ba-82fc0dfc8b89"); + public static final TransportVersion V_7_15_0 = new TransportVersion(7_15_00_99, "2273ac0e-00bb-4024-9e2e-ab78981623c6"); + public static final TransportVersion V_7_15_1 = new TransportVersion(7_15_01_99, "a8c3503d-3452-45cf-b385-e855e16547fe"); + public static final TransportVersion V_7_15_2 = new TransportVersion(7_15_02_99, "fbb8ad69-02e2-4c90-b2e4-23947107f8b4"); + public static final TransportVersion V_7_16_0 = new TransportVersion(7_16_00_99, "59abadd2-25db-4547-a991-c92306a3934e"); + public static final TransportVersion V_7_16_1 = new TransportVersion(7_16_01_99, "4ace6b6b-8bba-427f-8755-9e3b40092138"); + public static final TransportVersion V_7_16_2 = new TransportVersion(7_16_02_99, "785567b9-b320-48ef-b538-1753228904cd"); + public static final TransportVersion V_7_16_3 = new TransportVersion(7_16_03_99, "facf5ae7-3d4e-479c-9142-72529b784e30"); + public static final TransportVersion V_7_17_0 = new TransportVersion(7_17_00_99, "322efe93-4c73-4e15-9274-bb76836c8fa8"); + public static final TransportVersion V_7_17_1 = new TransportVersion(7_17_01_99, "51c72842-7974-4669-ad25-bf13ba307307"); + public static final TransportVersion V_7_17_2 = new TransportVersion(7_17_02_99, "82bea8d0-bfea-47c2-b7d3-217d8feb67e3"); + public static final TransportVersion V_7_17_3 = new TransportVersion(7_17_03_99, "a909c2f4-5cb8-46bf-af0f-cd18d1b7e9d2"); + public static final TransportVersion V_7_17_4 = new TransportVersion(7_17_04_99, "5076e164-18a4-4373-8be7-15f1843c46db"); + public static final TransportVersion V_7_17_5 = new TransportVersion(7_17_05_99, "da7e3509-7f61-4dd2-8d23-a61f628a62f6"); + public static final TransportVersion V_7_17_6 = new TransportVersion(7_17_06_99, "a47ecf02-e457-474f-887d-ee15a7ebd969"); + public static final TransportVersion V_7_17_7 = new TransportVersion(7_17_07_99, "108ba576-bb28-42f4-bcbf-845a0ce52560"); + public static final TransportVersion V_7_17_8 = new TransportVersion(7_17_08_99, "82a3e70d-cf0e-4efb-ad16-6077ab9fe19f"); + public static final TransportVersion V_7_17_9 = new TransportVersion(7_17_09_99, "afd50dda-735f-4eae-9309-3218ffec1b2d"); + public static final TransportVersion V_7_17_10 = new TransportVersion(7_17_10_99, "18ae7108-6f7a-4205-adbb-cfcd6aa6ccc6"); + public static final TransportVersion V_8_0_0 = new TransportVersion(8_00_00_99, "c7d2372c-9f01-4a79-8b11-227d862dfe4f"); + public static final TransportVersion V_8_0_1 = new TransportVersion(8_00_01_99, "56e044c3-37e5-4f7e-bd38-f493927354ac"); + public static final TransportVersion V_8_1_0 = new TransportVersion(8_01_00_99, "3dc49dce-9cef-492a-ac8d-3cc79f6b4280"); + public static final TransportVersion V_8_1_1 = new TransportVersion(8_01_01_99, "40cf32e5-17b0-4187-9de1-022cdea69db9"); + public static final TransportVersion V_8_1_2 = new TransportVersion(8_01_02_99, "54aa6394-08f3-4db7-b82e-314ae4b5b562"); + public static final TransportVersion V_8_1_3 = new TransportVersion(8_01_03_99, "9772b54b-1e14-485f-92e8-8847b3a3d569"); + public static final TransportVersion V_8_2_0 = new TransportVersion(8_02_00_99, "8ce6d555-202e-47db-ab7d-ade9dda1b7e8"); + public static final TransportVersion V_8_2_1 = new TransportVersion(8_02_01_99, "ffbb67e8-cc33-4b02-a995-b461d9ee36c8"); + public static final TransportVersion V_8_2_2 = new TransportVersion(8_02_02_99, "2499ee77-187d-4e10-8366-8e60d5f03676"); + public static final TransportVersion V_8_2_3 = new TransportVersion(8_02_03_99, "046aae43-3090-4ece-8c27-8d489f097548"); + public static final TransportVersion V_8_3_0 = new TransportVersion(8_03_00_99, "559ddb66-d857-4208-bed5-a995ccf478ea"); + public static final TransportVersion V_8_3_1 = new TransportVersion(8_03_01_99, "31f9b136-dbbe-4fa1-b811-d6afa2a1b472"); + public static final TransportVersion V_8_3_2 = new TransportVersion(8_03_02_99, "f6e9cd4c-2a71-4f9b-80d4-7ba97ebd18b2"); + public static final TransportVersion V_8_3_3 = new TransportVersion(8_03_03_99, "a784de3e-533e-4844-8728-c55c6932dd8e"); + public static final TransportVersion V_8_4_0 = new TransportVersion(8_04_00_99, "c0d12906-aa5b-45d4-94c7-cbcf4d9818ca"); + public static final TransportVersion V_8_4_1 = new TransportVersion(8_04_01_99, "9a915f76-f259-4361-b53d-3f19c7797fd8"); + public static final TransportVersion V_8_4_2 = new TransportVersion(8_04_02_99, "87c5b7b2-0f57-4172-8a81-b9f9a0198525"); + public static final TransportVersion V_8_4_3 = new TransportVersion(8_04_03_99, "327cb1a0-9b5d-4be9-8033-285c2549f770"); + public static final TransportVersion V_8_5_0 = new TransportVersion(8_05_00_99, "be3d7f23-7240-4904-9d7f-e25a0f766eca"); + public static final TransportVersion V_8_5_1 = new TransportVersion(8_05_01_99, "d349d202-f01c-4dbb-85dd-947fb4267c99"); + public static final TransportVersion V_8_5_2 = new TransportVersion(8_05_02_99, "b68b1331-fd64-44d9-9e71-f6796ec2024c"); + public static final TransportVersion V_8_5_3 = new TransportVersion(8_05_03_99, "9ca3c835-e3b7-4622-a08e-d51e42403b06"); + public static final TransportVersion V_8_5_4 = new TransportVersion(8_05_04_99, "97ee525c-555d-45ca-83dc-59cd592c8e86"); + public static final TransportVersion V_8_6_0 = new TransportVersion(8_06_00_99, "e209c5ed-3488-4415-b561-33492ca3b789"); + public static final TransportVersion V_8_6_1 = new TransportVersion(8_06_01_99, "9f113acb-1b21-4fda-bef9-2a3e669b5c7b"); + public static final TransportVersion V_8_6_2 = new TransportVersion(8_06_02_99, "5a82fb68-b265-4a06-97c5-53496f823f51"); + public static final TransportVersion V_8_7_0 = new TransportVersion(8_07_00_99, "f1ee7a85-4fa6-43f5-8679-33e2b750448b"); + /* + * READ THE JAVADOC ABOVE BEFORE ADDING NEW TRANSPORT VERSIONS + * Detached transport versions added below here. Starts at ES major version 10 equivalent. + */ + // NOTE: DO NOT UNCOMMENT until all transport code uses TransportVersion + // public static final TransportVersion V_10_000_000 = new TransportVersion(10_000_000, "dc3cbf06-3ed5-4e1b-9978-ee1d04d235bc"); + /* + * When adding a new transport version, ensure there is a gap (say, 100) between versions + * This is to make it possible to add intermediate versions for any bug fixes that may be required. + * + * When adding versions for patch fixes, add numbers in the middle of the gap. This is to ensure there is always some space + * for patch fixes between any two versions. + */ + + /** Reference to the current transport version */ + public static final TransportVersion CURRENT = V_8_7_0; + + /** Reference to the earliest compatible transport version to this version of the codebase */ + // TODO: can we programmatically calculate or check this? Don't want to introduce circular ref between Version/TransportVersion + public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0; + + static NavigableMap getAllVersionIds(Class cls) { + NavigableMap builder = new TreeMap<>(); + Map uniqueIds = new HashMap<>(); + + Set ignore = Set.of("ZERO", "CURRENT", "MINIMUM_COMPATIBLE"); + for (Field declaredField : cls.getFields()) { + if (declaredField.getType().equals(TransportVersion.class)) { + String fieldName = declaredField.getName(); + if (ignore.contains(fieldName)) { + continue; + } + try { + TransportVersion version = (TransportVersion) declaredField.get(null); + + TransportVersion maybePrevious = builder.put(version.id, version); + assert maybePrevious == null + : "expected [" + version.id + "] to be uniquely mapped but saw [" + maybePrevious + "] and [" + version + "]"; + + TransportVersion sameUniqueId = uniqueIds.put(version.uniqueId, version); + assert sameUniqueId == null + : "Versions " + + version + + " and " + + sameUniqueId + + " have the same unique id. Each TransportVersion should have a different unique id"; + } catch (IllegalAccessException e) { + assert false : "Version field [" + fieldName + "] should be public"; + } + } + } + + return Collections.unmodifiableNavigableMap(builder); + } + + private static final NavigableMap VERSION_IDS; + + static { + VERSION_IDS = getAllVersionIds(TransportVersion.class); + } + + public static TransportVersion readVersion(StreamInput in) throws IOException { + return fromId(in.readVInt()); + } + + public static TransportVersion fromId(int id) { + TransportVersion known = VERSION_IDS.get(id); + if (known != null) { + return known; + } + // this is a version we don't otherwise know about - just create a placeholder + return new TransportVersion(id, ""); + } + + public static void writeVersion(TransportVersion version, StreamOutput out) throws IOException { + out.writeVInt(version.id); + } + + /** + * Returns the minimum version of {@code version1} and {@code version2} + */ + public static TransportVersion min(TransportVersion version1, TransportVersion version2) { + return version1.id < version2.id ? version1 : version2; + } + + /** + * Returns the maximum version of {@code version1} and {@code version2} + */ + public static TransportVersion max(TransportVersion version1, TransportVersion version2) { + return version1.id > version2.id ? version1 : version2; + } + + /** + * returns a sorted collection of declared transport version constants + */ + public static Collection getAllVersions() { + return VERSION_IDS.values(); + } + + public final int id; + private final String uniqueId; + + TransportVersion(int id, String uniqueId) { + this.id = id; + this.uniqueId = Strings.requireNonEmpty(uniqueId, "Each TransportVersion needs a unique string id"); + } + + /** + * Placeholder method for code compatibility with code calling {@code CURRENT.minimumCompatibilityVersion}. + */ + @Deprecated(forRemoval = true) + public TransportVersion minimumCompatibilityVersion() { + assert this.equals(CURRENT) : "Should be CURRENT, but was: " + this; + return MINIMUM_COMPATIBLE; + } + + @Deprecated(forRemoval = true) + public boolean isCompatible(TransportVersion version) { + return onOrAfter(version.calculateMinimumCompatVersion()) && version.onOrAfter(calculateMinimumCompatVersion()); + } + + private TransportVersion minimumCompatibleVersion; + + /** + * Placeholder for code calling {@code minimumCompatibilityVersion} on arbitrary Version instances. + * Code calling this should be refactored to not do this. + */ + @Deprecated(forRemoval = true) + public TransportVersion calculateMinimumCompatVersion() { + if (minimumCompatibleVersion == null) { + minimumCompatibleVersion = Version.findVersion(this).minimumCompatibilityVersion().transportVersion; + } + return minimumCompatibleVersion; + } + + public boolean after(TransportVersion version) { + return version.id < id; + } + + public boolean onOrAfter(TransportVersion version) { + return version.id <= id; + } + + public boolean before(TransportVersion version) { + return version.id > id; + } + + public boolean onOrBefore(TransportVersion version) { + return version.id >= id; + } + + @Override + public int compareTo(TransportVersion other) { + return Integer.compare(this.id, other.id); + } + + @Override + public String toString() { + return Integer.toString(id); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TransportVersion version = (TransportVersion) o; + + if (id != version.id) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return id; + } + +} diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e43664f53d72..5c29f5ba97a2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -26,8 +26,12 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.NavigableMap; +import java.util.NoSuchElementException; import java.util.Objects; +import java.util.TreeMap; +@SuppressWarnings("checkstyle:linelength") public class Version implements Comparable, ToXContentFragment { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA @@ -46,95 +50,101 @@ public class Version implements Comparable, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; - public static final Version V_EMPTY = new Version(V_EMPTY_ID, org.apache.lucene.util.Version.LATEST); - public static final Version V_7_0_0 = new Version(7_00_00_99, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_0_1 = new Version(7_00_01_99, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_1_0 = new Version(7_01_00_99, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_1_1 = new Version(7_01_01_99, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_2_0 = new Version(7_02_00_99, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_2_1 = new Version(7_02_01_99, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version V_7_3_0 = new Version(7_03_00_99, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_3_1 = new Version(7_03_01_99, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_3_2 = new Version(7_03_02_99, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final Version V_7_4_0 = new Version(7_04_00_99, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final Version V_7_4_1 = new Version(7_04_01_99, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final Version V_7_4_2 = new Version(7_04_02_99, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final Version V_7_5_0 = new Version(7_05_00_99, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final Version V_7_5_1 = new Version(7_05_01_99, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final Version V_7_5_2 = new Version(7_05_02_99, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final Version V_7_6_0 = new Version(7_06_00_99, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final Version V_7_6_1 = new Version(7_06_01_99, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final Version V_7_6_2 = new Version(7_06_02_99, org.apache.lucene.util.Version.LUCENE_8_4_0); - public static final Version V_7_7_0 = new Version(7_07_00_99, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_7_1 = new Version(7_07_01_99, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_8_0 = new Version(7_08_00_99, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_8_1 = new Version(7_08_01_99, org.apache.lucene.util.Version.LUCENE_8_5_1); - public static final Version V_7_9_0 = new Version(7_09_00_99, org.apache.lucene.util.Version.LUCENE_8_6_0); - public static final Version V_7_9_1 = new Version(7_09_01_99, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_9_2 = new Version(7_09_02_99, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_9_3 = new Version(7_09_03_99, org.apache.lucene.util.Version.LUCENE_8_6_2); - public static final Version V_7_10_0 = new Version(7_10_00_99, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_10_1 = new Version(7_10_01_99, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_10_2 = new Version(7_10_02_99, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_11_0 = new Version(7_11_00_99, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_11_1 = new Version(7_11_01_99, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_11_2 = new Version(7_11_02_99, org.apache.lucene.util.Version.LUCENE_8_7_0); - public static final Version V_7_12_0 = new Version(7_12_00_99, org.apache.lucene.util.Version.LUCENE_8_8_0); - public static final Version V_7_12_1 = new Version(7_12_01_99, org.apache.lucene.util.Version.LUCENE_8_8_0); - public static final Version V_7_13_0 = new Version(7_13_00_99, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_1 = new Version(7_13_01_99, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_2 = new Version(7_13_02_99, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_3 = new Version(7_13_03_99, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_4 = new Version(7_13_04_99, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_14_0 = new Version(7_14_00_99, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_14_1 = new Version(7_14_01_99, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_14_2 = new Version(7_14_02_99, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_0 = new Version(7_15_00_99, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_1 = new Version(7_15_01_99, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_15_2 = new Version(7_15_02_99, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_16_0 = new Version(7_16_00_99, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_1 = new Version(7_16_01_99, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_2 = new Version(7_16_02_99, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_16_3 = new Version(7_16_03_99, org.apache.lucene.util.Version.LUCENE_8_10_1); - public static final Version V_7_17_0 = new Version(7_17_00_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_1 = new Version(7_17_01_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_2 = new Version(7_17_02_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_3 = new Version(7_17_03_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_4 = new Version(7_17_04_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_5 = new Version(7_17_05_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_6 = new Version(7_17_06_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_7 = new Version(7_17_07_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_7_17_8 = new Version(7_17_08_99, org.apache.lucene.util.Version.LUCENE_8_11_1); - public static final Version V_8_0_0 = new Version(8_00_00_99, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_0_1 = new Version(8_00_01_99, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_1_0 = new Version(8_01_00_99, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_1_1 = new Version(8_01_01_99, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_1_2 = new Version(8_01_02_99, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_1_3 = new Version(8_01_03_99, org.apache.lucene.util.Version.LUCENE_9_0_0); - public static final Version V_8_2_0 = new Version(8_02_00_99, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_8_2_1 = new Version(8_02_01_99, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_8_2_2 = new Version(8_02_02_99, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_8_2_3 = new Version(8_02_03_99, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_8_3_0 = new Version(8_03_00_99, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_8_3_1 = new Version(8_03_01_99, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_8_3_2 = new Version(8_03_02_99, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_8_3_3 = new Version(8_03_03_99, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_8_4_0 = new Version(8_04_00_99, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_8_4_1 = new Version(8_04_01_99, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_8_4_2 = new Version(8_04_02_99, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_8_4_3 = new Version(8_04_03_99, org.apache.lucene.util.Version.LUCENE_9_3_0); - public static final Version V_8_5_0 = new Version(8_05_00_99, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_8_5_1 = new Version(8_05_01_99, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_8_5_2 = new Version(8_05_02_99, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_8_6_0 = new Version(8_06_00_99, org.apache.lucene.util.Version.LUCENE_9_4_1); - public static final Version V_8_7_0 = new Version(8_07_00_99, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_EMPTY = new Version(V_EMPTY_ID, TransportVersion.ZERO, org.apache.lucene.util.Version.LATEST); + public static final Version V_7_0_0 = new Version(7_00_00_99, TransportVersion.V_7_0_0, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_0_1 = new Version(7_00_01_99, TransportVersion.V_7_0_1, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_1_0 = new Version(7_01_00_99, TransportVersion.V_7_1_0, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_1_1 = new Version(7_01_01_99, TransportVersion.V_7_1_1, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_2_0 = new Version(7_02_00_99, TransportVersion.V_7_2_0, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_2_1 = new Version(7_02_01_99, TransportVersion.V_7_2_1, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version V_7_3_0 = new Version(7_03_00_99, TransportVersion.V_7_3_0, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_3_1 = new Version(7_03_01_99, TransportVersion.V_7_3_1, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_3_2 = new Version(7_03_02_99, TransportVersion.V_7_3_2, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_4_0 = new Version(7_04_00_99, TransportVersion.V_7_4_0, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_4_1 = new Version(7_04_01_99, TransportVersion.V_7_4_1, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_4_2 = new Version(7_04_02_99, TransportVersion.V_7_4_2, org.apache.lucene.util.Version.LUCENE_8_2_0); + public static final Version V_7_5_0 = new Version(7_05_00_99, TransportVersion.V_7_5_0, org.apache.lucene.util.Version.LUCENE_8_3_0); + public static final Version V_7_5_1 = new Version(7_05_01_99, TransportVersion.V_7_5_1, org.apache.lucene.util.Version.LUCENE_8_3_0); + public static final Version V_7_5_2 = new Version(7_05_02_99, TransportVersion.V_7_5_2, org.apache.lucene.util.Version.LUCENE_8_3_0); + public static final Version V_7_6_0 = new Version(7_06_00_99, TransportVersion.V_7_6_0, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_6_1 = new Version(7_06_01_99, TransportVersion.V_7_6_1, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_6_2 = new Version(7_06_02_99, TransportVersion.V_7_6_2, org.apache.lucene.util.Version.LUCENE_8_4_0); + public static final Version V_7_7_0 = new Version(7_07_00_99, TransportVersion.V_7_7_0, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_7_1 = new Version(7_07_01_99, TransportVersion.V_7_7_1, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_8_0 = new Version(7_08_00_99, TransportVersion.V_7_8_0, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_8_1 = new Version(7_08_01_99, TransportVersion.V_7_8_1, org.apache.lucene.util.Version.LUCENE_8_5_1); + public static final Version V_7_9_0 = new Version(7_09_00_99, TransportVersion.V_7_9_0, org.apache.lucene.util.Version.LUCENE_8_6_0); + public static final Version V_7_9_1 = new Version(7_09_01_99, TransportVersion.V_7_9_1, org.apache.lucene.util.Version.LUCENE_8_6_2); + public static final Version V_7_9_2 = new Version(7_09_02_99, TransportVersion.V_7_9_2, org.apache.lucene.util.Version.LUCENE_8_6_2); + public static final Version V_7_9_3 = new Version(7_09_03_99, TransportVersion.V_7_9_3, org.apache.lucene.util.Version.LUCENE_8_6_2); + public static final Version V_7_10_0 = new Version(7_10_00_99, TransportVersion.V_7_10_0, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_10_1 = new Version(7_10_01_99, TransportVersion.V_7_10_1, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_10_2 = new Version(7_10_02_99, TransportVersion.V_7_10_2, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_11_0 = new Version(7_11_00_99, TransportVersion.V_7_11_0, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_11_1 = new Version(7_11_01_99, TransportVersion.V_7_11_1, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_11_2 = new Version(7_11_02_99, TransportVersion.V_7_11_2, org.apache.lucene.util.Version.LUCENE_8_7_0); + public static final Version V_7_12_0 = new Version(7_12_00_99, TransportVersion.V_7_12_0, org.apache.lucene.util.Version.LUCENE_8_8_0); + public static final Version V_7_12_1 = new Version(7_12_01_99, TransportVersion.V_7_12_1, org.apache.lucene.util.Version.LUCENE_8_8_0); + public static final Version V_7_13_0 = new Version(7_13_00_99, TransportVersion.V_7_13_0, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_1 = new Version(7_13_01_99, TransportVersion.V_7_13_1, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_2 = new Version(7_13_02_99, TransportVersion.V_7_13_2, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_3 = new Version(7_13_03_99, TransportVersion.V_7_13_3, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_13_4 = new Version(7_13_04_99, TransportVersion.V_7_13_4, org.apache.lucene.util.Version.LUCENE_8_8_2); + public static final Version V_7_14_0 = new Version(7_14_00_99, TransportVersion.V_7_14_0, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_1 = new Version(7_14_01_99, TransportVersion.V_7_14_1, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_2 = new Version(7_14_02_99, TransportVersion.V_7_14_2, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_0 = new Version(7_15_00_99, TransportVersion.V_7_15_0, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_1 = new Version(7_15_01_99, TransportVersion.V_7_15_1, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_15_2 = new Version(7_15_02_99, TransportVersion.V_7_15_2, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_16_0 = new Version(7_16_00_99, TransportVersion.V_7_16_0, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_1 = new Version(7_16_01_99, TransportVersion.V_7_16_1, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_2 = new Version(7_16_02_99, TransportVersion.V_7_16_2, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_16_3 = new Version(7_16_03_99, TransportVersion.V_7_16_3, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_7_17_0 = new Version(7_17_00_99, TransportVersion.V_7_17_0, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_1 = new Version(7_17_01_99, TransportVersion.V_7_17_1, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_2 = new Version(7_17_02_99, TransportVersion.V_7_17_2, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_3 = new Version(7_17_03_99, TransportVersion.V_7_17_3, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_4 = new Version(7_17_04_99, TransportVersion.V_7_17_4, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_5 = new Version(7_17_05_99, TransportVersion.V_7_17_5, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_6 = new Version(7_17_06_99, TransportVersion.V_7_17_6, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_7 = new Version(7_17_07_99, TransportVersion.V_7_17_7, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_8 = new Version(7_17_08_99, TransportVersion.V_7_17_8, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_9 = new Version(7_17_09_99, TransportVersion.V_7_17_9, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_7_17_10 = new Version(7_17_10_99, TransportVersion.V_7_17_10, org.apache.lucene.util.Version.LUCENE_8_11_1); + public static final Version V_8_0_0 = new Version(8_00_00_99, TransportVersion.V_8_0_0, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_0_1 = new Version(8_00_01_99, TransportVersion.V_8_0_1, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_1_0 = new Version(8_01_00_99, TransportVersion.V_8_1_0, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_1_1 = new Version(8_01_01_99, TransportVersion.V_8_1_1, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_1_2 = new Version(8_01_02_99, TransportVersion.V_8_1_2, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_1_3 = new Version(8_01_03_99, TransportVersion.V_8_1_3, org.apache.lucene.util.Version.LUCENE_9_0_0); + public static final Version V_8_2_0 = new Version(8_02_00_99, TransportVersion.V_8_2_0, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_8_2_1 = new Version(8_02_01_99, TransportVersion.V_8_2_1, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_8_2_2 = new Version(8_02_02_99, TransportVersion.V_8_2_2, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_8_2_3 = new Version(8_02_03_99, TransportVersion.V_8_2_3, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_8_3_0 = new Version(8_03_00_99, TransportVersion.V_8_3_0, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_8_3_1 = new Version(8_03_01_99, TransportVersion.V_8_3_1, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_8_3_2 = new Version(8_03_02_99, TransportVersion.V_8_3_2, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_8_3_3 = new Version(8_03_03_99, TransportVersion.V_8_3_3, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_8_4_0 = new Version(8_04_00_99, TransportVersion.V_8_4_0, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_8_4_1 = new Version(8_04_01_99, TransportVersion.V_8_4_1, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_8_4_2 = new Version(8_04_02_99, TransportVersion.V_8_4_2, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_8_4_3 = new Version(8_04_03_99, TransportVersion.V_8_4_3, org.apache.lucene.util.Version.LUCENE_9_3_0); + public static final Version V_8_5_0 = new Version(8_05_00_99, TransportVersion.V_8_5_0, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_8_5_1 = new Version(8_05_01_99, TransportVersion.V_8_5_1, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_8_5_2 = new Version(8_05_02_99, TransportVersion.V_8_5_2, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_8_5_3 = new Version(8_05_03_99, TransportVersion.V_8_5_3, org.apache.lucene.util.Version.LUCENE_9_4_2); + public static final Version V_8_6_0 = new Version(8_06_00_99, TransportVersion.V_8_6_0, org.apache.lucene.util.Version.LUCENE_9_4_2); + public static final Version V_8_6_1 = new Version(8_06_01_99, TransportVersion.V_8_6_1, org.apache.lucene.util.Version.LUCENE_9_4_2); + public static final Version V_8_6_2 = new Version(8_06_02_99, TransportVersion.V_8_6_2, org.apache.lucene.util.Version.LUCENE_9_4_2); + public static final Version V_8_7_0 = new Version(8_07_00_99, TransportVersion.V_8_7_0, org.apache.lucene.util.Version.LUCENE_9_5_0); + public static final Version CURRENT = V_8_7_0; - private static final Map idToVersion; - private static final Map stringToVersion; + private static final NavigableMap VERSION_IDS; + private static final Map VERSION_STRINGS; static { - final Map builder = new HashMap<>(); + final NavigableMap builder = new TreeMap<>(); final Map builderByString = new HashMap<>(); for (final Field declaredField : Version.class.getFields()) { @@ -176,16 +186,30 @@ public class Version implements Comparable, ToXContentFragment { + "]"; builder.put(V_EMPTY_ID, V_EMPTY); builderByString.put(V_EMPTY.toString(), V_EMPTY); - idToVersion = Map.copyOf(builder); - stringToVersion = Map.copyOf(builderByString); + + VERSION_IDS = Collections.unmodifiableNavigableMap(builder); + VERSION_STRINGS = Map.copyOf(builderByString); } public static Version readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); } + /** + * Returns the highest Version that has this or a lesser TransportVersion. + */ + @Deprecated + static Version findVersion(TransportVersion transportVersion) { + return VERSION_IDS.descendingMap() + .values() + .stream() + .filter(v -> v.transportVersion.compareTo(transportVersion) <= 0) + .findFirst() + .orElseThrow(() -> new NoSuchElementException("No valid Version found")); // only if transportVersion < 0 ????? + } + public static Version fromId(int id) { - final Version known = idToVersion.get(id); + final Version known = VERSION_IDS.get(id); if (known != null) { return known; } @@ -197,7 +221,7 @@ private static Version fromIdSlow(int id) { // Our best guess is to use the same Lucene version as the previous // version in the list, assuming that it didn't change. List versions = DeclaredVersionsHolder.DECLARED_VERSIONS; - Version tmp = new Version(id, org.apache.lucene.util.Version.LATEST); + Version tmp = new Version(id, TransportVersion.CURRENT, org.apache.lucene.util.Version.LATEST); int index = Collections.binarySearch(versions, tmp); if (index < 0) { index = -2 - index; @@ -213,7 +237,8 @@ private static Version fromIdSlow(int id) { } else { luceneVersion = versions.get(index).luceneVersion; } - return new Version(id, luceneVersion); + // TODO: assume this is an old version that has transport version == release version + return new Version(id, TransportVersion.fromId(id), luceneVersion); } public static void writeVersion(Version version, StreamOutput out) throws IOException { @@ -221,14 +246,14 @@ public static void writeVersion(Version version, StreamOutput out) throws IOExce } /** - * Returns the minimum version between the 2. + * Returns the minimum version of {@code version1} and {@code version2} */ public static Version min(Version version1, Version version2) { return version1.id < version2.id ? version1 : version2; } /** - * Returns the maximum version between the 2 + * Returns the maximum version of {@code version1} and {@code version2} */ public static Version max(Version version1, Version version2) { return version1.id > version2.id ? version1 : version2; @@ -241,7 +266,7 @@ public static Version fromString(String version) { if (Strings.hasLength(version) == false) { return Version.CURRENT; } - final Version cached = stringToVersion.get(version); + final Version cached = VERSION_STRINGS.get(version); if (cached != null) { return cached; } @@ -296,16 +321,18 @@ private static Version fromStringSlow(String version) { public final byte minor; public final byte revision; public final byte build; + public final TransportVersion transportVersion; public final org.apache.lucene.util.Version luceneVersion; private final String toString; private final int previousMajorId; - Version(int id, org.apache.lucene.util.Version luceneVersion) { + Version(int id, TransportVersion transportVersion, org.apache.lucene.util.Version luceneVersion) { this.id = id; this.major = (byte) ((id / 1000000) % 100); this.minor = (byte) ((id / 10000) % 100); this.revision = (byte) ((id / 100) % 100); this.build = (byte) (id % 100); + this.transportVersion = Objects.requireNonNull(transportVersion); this.luceneVersion = Objects.requireNonNull(luceneVersion); this.toString = major + "." + minor + "." + revision; this.previousMajorId = major > 0 ? (major - 1) * 1000000 + 99 : major; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index 1b918186009c..dd8b629f208c 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -8,14 +8,19 @@ package org.elasticsearch.action; +import org.elasticsearch.Assertions; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -251,6 +256,13 @@ public String toString() { } } + /** + * Creates a listener which releases the given resource on completion (whether success or failure) + */ + static ActionListener releasing(Releasable releasable) { + return assertOnce(wrap(runnableFromReleasable(releasable))); + } + /** * Creates a listener that listens for a response (or failure) and executes the * corresponding runnable when the response (or failure) is received. @@ -289,6 +301,29 @@ public String toString() { }; } + /** + * Adds a wrapper around a listener which catches exceptions thrown by its {@link #onResponse} method and feeds them to its + * {@link #onFailure}. + */ + static ActionListener wrap(ActionListener delegate) { + return new ActionListener<>() { + @Override + public void onResponse(Response response) { + ActionListener.run(delegate, l -> l.onResponse(response)); + } + + @Override + public void onFailure(Exception e) { + delegate.onFailure(e); + } + + @Override + public String toString() { + return "wrapped{" + delegate + "}"; + } + }; + } + /** * Notifies every given listener with the response passed to {@link #onResponse(Object)}. If a listener itself throws an exception * the exception is forwarded to {@link #onFailure(Exception)}. If in turn {@link #onFailure(Exception)} fails all remaining @@ -331,7 +366,15 @@ static void onFailure(Iterable> listeners, E * callback when the listener is notified via either {@code #onResponse} or {@code #onFailure}. */ static ActionListener runAfter(ActionListener delegate, Runnable runAfter) { - return new RunAfterActionListener<>(delegate, runAfter); + return assertOnce(new RunAfterActionListener<>(delegate, runAfter)); + } + + /** + * Wraps a given listener and returns a new listener which releases the provided {@code releaseAfter} + * resource when the listener is notified via either {@code #onResponse} or {@code #onFailure}. + */ + static ActionListener releaseAfter(ActionListener delegate, Releasable releaseAfter) { + return assertOnce(new RunAfterActionListener<>(delegate, runnableFromReleasable(releaseAfter))); } final class RunAfterActionListener extends Delegating { @@ -374,7 +417,7 @@ public String toString() { * not be executed. */ static ActionListener runBefore(ActionListener delegate, CheckedRunnable runBefore) { - return new RunBeforeActionListener<>(delegate, runBefore); + return assertOnce(new RunBeforeActionListener<>(delegate, runBefore)); } final class RunBeforeActionListener extends Delegating { @@ -418,15 +461,27 @@ public String toString() { * and {@link #onFailure(Exception)} of the provided listener will be called at most once. */ static ActionListener notifyOnce(ActionListener delegate) { - return new NotifyOnceListener() { + final var delegateRef = new AtomicReference<>(delegate); + return new ActionListener<>() { @Override - protected void innerOnResponse(Response response) { - delegate.onResponse(response); + public void onResponse(Response response) { + final var acquired = delegateRef.getAndSet(null); + if (acquired != null) { + acquired.onResponse(response); + } } @Override - protected void innerOnFailure(Exception e) { - delegate.onFailure(e); + public void onFailure(Exception e) { + final var acquired = delegateRef.getAndSet(null); + if (acquired != null) { + acquired.onFailure(e); + } + } + + @Override + public String toString() { + return "notifyOnce[" + delegateRef.get() + "]"; } }; } @@ -458,4 +513,61 @@ static void completeWith(ActionListener listener, CheckedSu throw ex; } } + + private static Runnable runnableFromReleasable(Releasable releasable) { + return new Runnable() { + @Override + public void run() { + Releasables.closeExpectNoException(releasable); + } + + @Override + public String toString() { + return "release[" + releasable + "]"; + } + }; + } + + static ActionListener assertOnce(ActionListener delegate) { + if (Assertions.ENABLED) { + return new ActionListener<>() { + + // if complete, records the stack trace which first completed it + private final AtomicReference firstCompletion = new AtomicReference<>(); + + private void assertFirstRun() { + var previousRun = firstCompletion.compareAndExchange(null, new ElasticsearchException(delegate.toString())); + assert previousRun == null : previousRun; // reports the stack traces of both completions + } + + @Override + public void onResponse(Response response) { + assertFirstRun(); + delegate.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + assertFirstRun(); + delegate.onFailure(e); + } + + @Override + public String toString() { + return delegate.toString(); + } + }; + } else { + return delegate; + } + } + + static > void run(L listener, CheckedConsumer action) { + try { + action.accept(listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 172654fc915a..59d055e27415 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -41,6 +41,7 @@ import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateNodeRemovalAction; import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateNodeRemovalAction; +import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateShardPathAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; @@ -707,6 +708,7 @@ public void reg actions.register(TransportNodesListShardStoreMetadata.TYPE, TransportNodesListShardStoreMetadata.class); actions.register(TransportShardFlushAction.TYPE, TransportShardFlushAction.class); actions.register(TransportShardRefreshAction.TYPE, TransportShardRefreshAction.class); + actions.register(TransportPrevalidateShardPathAction.TYPE, TransportPrevalidateShardPathAction.class); // desired nodes actions.register(GetDesiredNodesAction.INSTANCE, TransportGetDesiredNodesAction.class); @@ -766,7 +768,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestResetFeatureStateAction()); registerHandler.accept(new RestGetFeatureUpgradeStatusAction()); registerHandler.accept(new RestPostFeatureUpgradeAction()); - registerHandler.accept(new RestGetIndicesAction(threadPool)); + registerHandler.accept(new RestGetIndicesAction()); registerHandler.accept(new RestIndicesStatsAction()); registerHandler.accept(new RestIndicesSegmentsAction()); registerHandler.accept(new RestIndicesShardStoresAction()); diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index f2715710ed51..2226d3be323d 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; @@ -118,7 +118,7 @@ public DocWriteResponse(ShardId shardId, String id, long seqNo, long primaryTerm protected DocWriteResponse(ShardId shardId, StreamInput in) throws IOException { super(in); this.shardId = shardId; - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -137,7 +137,7 @@ protected DocWriteResponse(ShardId shardId, StreamInput in) throws IOException { protected DocWriteResponse(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -266,7 +266,7 @@ public void writeTo(StreamOutput out) throws IOException { } private void writeWithoutShardId(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java index 03690b632062..29df8ec55c9b 100644 --- a/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java +++ b/server/src/main/java/org/elasticsearch/action/NoShardAvailableActionException.java @@ -14,20 +14,36 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.io.PrintWriter; public class NoShardAvailableActionException extends ElasticsearchException { + private static final StackTraceElement[] EMPTY_STACK_TRACE = new StackTraceElement[0]; + + // This is set so that no StackTrace is serialized in the scenario when we wrap other shard failures. + // It isn't necessary to serialize this field over the wire as the empty stack trace is serialized instead. + private final boolean onShardFailureWrapper; + + public static NoShardAvailableActionException forOnShardFailureWrapper(String msg) { + return new NoShardAvailableActionException(null, msg, null, true); + } + public NoShardAvailableActionException(ShardId shardId) { - this(shardId, null); + this(shardId, null, null, false); } public NoShardAvailableActionException(ShardId shardId, String msg) { - this(shardId, msg, null); + this(shardId, msg, null, false); } public NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause) { + this(shardId, msg, cause, false); + } + + private NoShardAvailableActionException(ShardId shardId, String msg, Throwable cause, boolean onShardFailureWrapper) { super(msg, cause); setShard(shardId); + this.onShardFailureWrapper = onShardFailureWrapper; } @Override @@ -37,5 +53,22 @@ public RestStatus status() { public NoShardAvailableActionException(StreamInput in) throws IOException { super(in); + onShardFailureWrapper = false; + } + + @Override + public StackTraceElement[] getStackTrace() { + return onShardFailureWrapper ? EMPTY_STACK_TRACE : super.getStackTrace(); + } + + @Override + public void printStackTrace(PrintWriter s) { + if (onShardFailureWrapper == false) { + super.printStackTrace(s); + } else { + // Override to simply print the first line of the trace, which is the current exception. + // Since we aren't serializing the repetitive stacktrace onShardFailureWrapper, we shouldn't print it out either + s.println(this); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/NotifyOnceListener.java b/server/src/main/java/org/elasticsearch/action/NotifyOnceListener.java deleted file mode 100644 index 582290f2a434..000000000000 --- a/server/src/main/java/org/elasticsearch/action/NotifyOnceListener.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action; - -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * A listener that ensures that only one of onResponse or onFailure is called. And the method - * the is called is only called once. Subclasses should implement notification logic with - * innerOnResponse and innerOnFailure. - */ -public abstract class NotifyOnceListener implements ActionListener { - - private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false); - - protected abstract void innerOnResponse(Response response); - - protected abstract void innerOnFailure(Exception e); - - @Override - public final void onResponse(Response response) { - if (hasBeenCalled.compareAndSet(false, true)) { - innerOnResponse(response); - } - } - - @Override - public final void onFailure(Exception e) { - if (hasBeenCalled.compareAndSet(false, true)) { - innerOnFailure(e); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java index ab8a3f199230..000ba521b763 100644 --- a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java +++ b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java @@ -9,7 +9,7 @@ package org.elasticsearch.action; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MapperService; @@ -41,7 +41,7 @@ public RestStatus status() { public RoutingMissingException(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readString(); } id = in.readString(); @@ -50,7 +50,7 @@ public RoutingMissingException(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java b/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java new file mode 100644 index 000000000000..273c542bc825 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/SingleResultDeduplicator.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; + +/** + * + * Wraps an async action that consumes an {@link ActionListener} such that multiple invocations of {@link #execute(ActionListener)} can + * share the result from a single call to the wrapped action. This implementation is similar to {@link ResultDeduplicator} but offers + * stronger guarantees of not seeing a stale result ever. Concretely, every invocation of {@link #execute(ActionListener)} is guaranteed to + * be resolved with a response that has been computed at a time after the call to {@code execute} has been made. This allows this class to + * be used to deduplicate results from actions that produce results that change over time transparently. + * + * @param Result type + */ +public final class SingleResultDeduplicator { + + private final ThreadContext threadContext; + + /** + * List of listeners waiting for the execution after the current in-progress execution. If {@code null} then no execution is in + * progress currently, otherwise an execution is in progress and will trigger another execution that will resolve any listeners queued + * up here once done. + */ + private List> waitingListeners; + + private final Consumer> executeAction; + + public SingleResultDeduplicator(ThreadContext threadContext, Consumer> executeAction) { + this.threadContext = threadContext; + this.executeAction = executeAction; + } + + /** + * Execute the action for the given {@code listener}. + * @param listener listener to resolve with execution result + */ + public void execute(ActionListener listener) { + synchronized (this) { + if (waitingListeners == null) { + // no queued up listeners, just execute this one directly without deduplication and instantiate the list so that + // subsequent executions will wait + waitingListeners = new ArrayList<>(); + } else { + // already running an execution, queue this one up + waitingListeners.add(ContextPreservingActionListener.wrapPreservingContext(listener, threadContext)); + return; + } + } + doExecute(listener); + } + + private void doExecute(ActionListener listener) { + final ActionListener wrappedListener = ActionListener.runBefore(listener, () -> { + final List> listeners; + synchronized (this) { + if (waitingListeners.isEmpty()) { + // no listeners were queued up while this execution ran, so we just reset the state to not having a running execution + waitingListeners = null; + return; + } else { + // we have queued up listeners, so we create a fresh list for the next execution and execute once to handle the + // listeners currently queued up + listeners = waitingListeners; + waitingListeners = new ArrayList<>(); + } + } + doExecute(new ActionListener<>() { + @Override + public void onResponse(T response) { + ActionListener.onResponse(listeners, response); + } + + @Override + public void onFailure(Exception e) { + ActionListener.onFailure(listeners, e); + } + }); + }); + ActionListener.run(wrappedListener, executeAction::accept); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/StepListener.java b/server/src/main/java/org/elasticsearch/action/StepListener.java index dab36040e3e4..e36b799b9290 100644 --- a/server/src/main/java/org/elasticsearch/action/StepListener.java +++ b/server/src/main/java/org/elasticsearch/action/StepListener.java @@ -14,6 +14,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiFunction; import java.util.function.Consumer; @@ -40,7 +41,9 @@ * } */ -public final class StepListener extends NotifyOnceListener { +public final class StepListener implements ActionListener { + + private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false); private final ListenableFuture delegate; public StepListener() { @@ -48,13 +51,17 @@ public StepListener() { } @Override - protected void innerOnResponse(Response response) { - delegate.onResponse(response); + public void onResponse(Response response) { + if (hasBeenCalled.compareAndSet(false, true)) { + delegate.onResponse(response); + } } @Override - protected void innerOnFailure(Exception e) { - delegate.onFailure(e); + public void onFailure(Exception e) { + if (hasBeenCalled.compareAndSet(false, true)) { + delegate.onFailure(e); + } } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index dfb5c2ef02af..fd2721241fb9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -67,7 +67,7 @@ public ClusterAllocationExplanation( } public ClusterAllocationExplanation(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { this.specificShard = in.readBoolean(); } else { this.specificShard = true; // suppress "this is a random shard" warning in BwC situations @@ -81,7 +81,7 @@ public ClusterAllocationExplanation(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { out.writeBoolean(specificShard); } // else suppress "this is a random shard" warning in BwC situations shardRouting.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java index a7695f5512fd..2b481d4cac3b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java @@ -7,36 +7,55 @@ */ package org.elasticsearch.action.admin.cluster.allocation; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.allocation.allocator.ClusterBalanceStats; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceStats; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; -public class DesiredBalanceResponse extends ActionResponse implements ToXContentObject { +import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; + +public class DesiredBalanceResponse extends ActionResponse implements ChunkedToXContentObject { + + private static final TransportVersion CLUSTER_BALANCE_STATS_VERSION = TransportVersion.V_8_7_0; private final DesiredBalanceStats stats; + private final ClusterBalanceStats clusterBalanceStats; private final Map> routingTable; - public DesiredBalanceResponse(DesiredBalanceStats stats, Map> routingTable) { + public DesiredBalanceResponse( + DesiredBalanceStats stats, + ClusterBalanceStats clusterBalanceStats, + Map> routingTable + ) { this.stats = stats; + this.clusterBalanceStats = clusterBalanceStats; this.routingTable = routingTable; } public static DesiredBalanceResponse from(StreamInput in) throws IOException { return new DesiredBalanceResponse( DesiredBalanceStats.readFrom(in), + in.getTransportVersion().onOrAfter(CLUSTER_BALANCE_STATS_VERSION) + ? ClusterBalanceStats.readFrom(in) + : ClusterBalanceStats.EMPTY, in.readImmutableMap(StreamInput::readString, v -> v.readImmutableMap(StreamInput::readVInt, DesiredShards::from)) ); } @@ -44,6 +63,9 @@ public static DesiredBalanceResponse from(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { stats.writeTo(out); + if (out.getTransportVersion().onOrAfter(CLUSTER_BALANCE_STATS_VERSION)) { + clusterBalanceStats.writeTo(out); + } out.writeMap( routingTable, StreamOutput::writeString, @@ -56,32 +78,38 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - builder.startObject("stats"); - stats.toXContent(builder, params); - builder.endObject(); - } - { - builder.startObject("routing_table"); - for (Map.Entry> indexEntry : routingTable.entrySet()) { - builder.startObject(indexEntry.getKey()); - for (Map.Entry shardEntry : indexEntry.getValue().entrySet()) { - builder.field(String.valueOf(shardEntry.getKey())); - shardEntry.getValue().toXContent(builder, params); - } - builder.endObject(); + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + singleChunk( + (builder, p) -> builder.startObject(), + (builder, p) -> builder.field("stats", stats), + (builder, p) -> builder.field("cluster_balance_stats", clusterBalanceStats), + (builder, p) -> builder.startObject("routing_table") + ), + routingTableToXContentChunked(), + singleChunk((builder, p) -> builder.endObject(), (builder, p) -> builder.endObject()) + ); + } + + private Iterator routingTableToXContentChunked() { + return routingTable.entrySet().stream().map(indexEntry -> (ToXContent) (builder, p) -> { + builder.startObject(indexEntry.getKey()); + for (Map.Entry shardEntry : indexEntry.getValue().entrySet()) { + builder.field(String.valueOf(shardEntry.getKey())); + shardEntry.getValue().toXContent(builder, p); } - builder.endObject(); - } - return builder.endObject(); + return builder.endObject(); + }).iterator(); } public DesiredBalanceStats getStats() { return stats; } + public ClusterBalanceStats getClusterBalanceStats() { + return clusterBalanceStats; + } + public Map> getRoutingTable() { return routingTable; } @@ -91,17 +119,24 @@ public boolean equals(Object o) { if (this == o) return true; return o instanceof DesiredBalanceResponse that && Objects.equals(stats, that.stats) + && Objects.equals(clusterBalanceStats, that.clusterBalanceStats) && Objects.equals(routingTable, that.routingTable); } @Override public int hashCode() { - return Objects.hash(stats, routingTable); + return Objects.hash(stats, clusterBalanceStats, routingTable); } @Override public String toString() { - return "DesiredBalanceResponse{stats=" + stats + ", routingTable=" + routingTable + "}"; + return "DesiredBalanceResponse{stats=" + + stats + + ", clusterBalanceStats=" + + clusterBalanceStats + + ", routingTable=" + + routingTable + + "}"; } public record DesiredShards(List current, ShardAssignmentView desired) implements Writeable, ToXContentObject { @@ -139,21 +174,42 @@ public record ShardView( boolean relocatingNodeIsDesired, int shardId, String index, - AllocationId allocationId + @Nullable Double forecastWriteLoad, + @Nullable Long forecastShardSizeInBytes ) implements Writeable, ToXContentObject { + private static final TransportVersion ADD_FORECASTS_VERSION = TransportVersion.V_8_7_0; + public static ShardView from(StreamInput in) throws IOException { - return new ShardView( - ShardRoutingState.fromValue(in.readByte()), - in.readBoolean(), - in.readOptionalString(), - in.readBoolean(), - in.readOptionalString(), - in.readBoolean(), - in.readVInt(), - in.readString(), - in.readOptionalWriteable(AllocationId::new) - ); + if (in.getTransportVersion().onOrAfter(ADD_FORECASTS_VERSION)) { + return new ShardView( + ShardRoutingState.fromValue(in.readByte()), + in.readBoolean(), + in.readOptionalString(), + in.readBoolean(), + in.readOptionalString(), + in.readBoolean(), + in.readVInt(), + in.readString(), + in.readOptionalDouble(), + in.readOptionalLong() + ); + } else { + var shardView = new ShardView( + ShardRoutingState.fromValue(in.readByte()), + in.readBoolean(), + in.readOptionalString(), + in.readBoolean(), + in.readOptionalString(), + in.readBoolean(), + in.readVInt(), + in.readString(), + null, + null + ); + in.readOptionalWriteable(AllocationId::new); + return shardView; + } } @Override @@ -166,7 +222,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(relocatingNodeIsDesired); out.writeVInt(shardId); out.writeString(index); - out.writeOptionalWriteable(allocationId); + if (out.getTransportVersion().onOrAfter(ADD_FORECASTS_VERSION)) { + out.writeOptionalDouble(forecastWriteLoad); + out.writeOptionalLong(forecastShardSizeInBytes); + } else { + out.writeMissingWriteable(AllocationId.class); + } } @Override @@ -180,6 +241,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .field("relocating_node_is_desired", relocatingNodeIsDesired) .field("shard_id", shardId) .field("index", index) + .field("forecast_write_load", forecastWriteLoad) + .field("forecast_shard_size_in_bytes", forecastShardSizeInBytes) .endObject(); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java index e2b40605798e..0ad58e9b987c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetDesiredBalanceAction.java @@ -11,14 +11,17 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; -import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; +import org.elasticsearch.cluster.routing.allocation.allocator.ClusterBalanceStats; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalance; import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardAssignment; @@ -34,11 +37,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.OptionalDouble; +import java.util.OptionalLong; public class TransportGetDesiredBalanceAction extends TransportMasterNodeReadAction { @Nullable private final DesiredBalanceShardsAllocator desiredBalanceShardsAllocator; + private final ClusterInfoService clusterInfoService; + private final WriteLoadForecaster writeLoadForecaster; @Inject public TransportGetDesiredBalanceAction( @@ -47,7 +54,9 @@ public TransportGetDesiredBalanceAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ShardsAllocator shardsAllocator + ShardsAllocator shardsAllocator, + ClusterInfoService clusterInfoService, + WriteLoadForecaster writeLoadForecaster ) { super( GetDesiredBalanceAction.NAME, @@ -60,9 +69,9 @@ public TransportGetDesiredBalanceAction( DesiredBalanceResponse::from, ThreadPool.Names.MANAGEMENT ); - this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator - ? (DesiredBalanceShardsAllocator) shardsAllocator - : null; + this.desiredBalanceShardsAllocator = shardsAllocator instanceof DesiredBalanceShardsAllocator allocator ? allocator : null; + this.clusterInfoService = clusterInfoService; + this.writeLoadForecaster = writeLoadForecaster; } @Override @@ -72,13 +81,8 @@ protected void masterOperation( ClusterState state, ActionListener listener ) throws Exception { - String allocatorName = ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.get(state.metadata().settings()); - if (allocatorName.equals(ClusterModule.DESIRED_BALANCE_ALLOCATOR) == false || desiredBalanceShardsAllocator == null) { - listener.onFailure( - new ResourceNotFoundException( - "Expected the shard balance allocator to be `desired_balance`, but got `" + allocatorName + "`" - ) - ); + if (desiredBalanceShardsAllocator == null) { + listener.onFailure(new ResourceNotFoundException("Desired balance allocator is not in use, no desired balance found")); return; } @@ -87,15 +91,31 @@ protected void masterOperation( listener.onFailure(new ResourceNotFoundException("Desired balance is not computed yet")); return; } + listener.onResponse( + new DesiredBalanceResponse( + desiredBalanceShardsAllocator.getStats(), + ClusterBalanceStats.createFrom(state, clusterInfoService.getClusterInfo(), writeLoadForecaster), + createRoutingTable(state, latestDesiredBalance) + ) + ); + } + + private Map> createRoutingTable( + ClusterState state, + DesiredBalance latestDesiredBalance + ) { Map> routingTable = new HashMap<>(); for (IndexRoutingTable indexRoutingTable : state.routingTable()) { Map indexDesiredShards = new HashMap<>(); + IndexMetadata indexMetadata = state.metadata().index(indexRoutingTable.getIndex()); for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId); ShardAssignment shardAssignment = latestDesiredBalance.assignments().get(shardRoutingTable.shardId()); List shardViews = new ArrayList<>(); for (int idx = 0; idx < shardRoutingTable.size(); idx++) { ShardRouting shard = shardRoutingTable.shard(idx); + OptionalDouble forecastedWriteLoad = writeLoadForecaster.getForecastedWriteLoad(indexMetadata); + OptionalLong forecastedShardSizeInBytes = indexMetadata.getForecastedShardSizeInBytes(); shardViews.add( new DesiredBalanceResponse.ShardView( shard.state(), @@ -110,7 +130,8 @@ protected void masterOperation( && shardAssignment.nodeIds().contains(shard.relocatingNodeId()), shard.shardId().id(), shard.getIndexName(), - shard.allocationId() + forecastedWriteLoad.isPresent() ? forecastedWriteLoad.getAsDouble() : null, + forecastedShardSizeInBytes.isPresent() ? forecastedShardSizeInBytes.getAsLong() : null ) ); } @@ -131,7 +152,7 @@ protected void masterOperation( } routingTable.put(indexRoutingTable.getIndex().getName(), indexDesiredShards); } - listener.onResponse(new DesiredBalanceResponse(desiredBalanceShardsAllocator.getStats(), routingTable)); + return routingTable; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 75d512683e31..01bd97bdc5fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.admin.cluster.configuration; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; @@ -72,7 +72,7 @@ public AddVotingConfigExclusionsRequest(String[] nodeIds, String[] nodeNames, Ti public AddVotingConfigExclusionsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { final String[] legacyNodeDescriptions = in.readStringArray(); if (legacyNodeDescriptions.length > 0) { throw new IllegalArgumentException("legacy [node_name] field was deprecated and must be empty"); @@ -185,7 +185,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } out.writeStringArray(nodeIds); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index b0d7000afa8a..e1fcf41de5eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -79,9 +79,9 @@ protected void masterOperation( Task task, UpdateDesiredNodesRequest request, ClusterState state, - ActionListener listener + ActionListener responseListener ) throws Exception { - try { + ActionListener.run(responseListener, listener -> { settingsValidator.validate(request.getNodes()); clusterService.submitStateUpdateTask( "update-desired-nodes", @@ -89,15 +89,13 @@ protected void masterOperation( ClusterStateTaskConfig.build(Priority.URGENT, request.masterNodeTimeout()), taskExecutor ); - } catch (Exception e) { - listener.onFailure(e); - } + }); } @Override protected void doExecute(Task task, UpdateDesiredNodesRequest request, ActionListener listener) { final var minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); - if (request.isCompatibleWithVersion(minNodeVersion) == false) { + if (request.isCompatibleWithVersion(minNodeVersion.transportVersion) == false) { listener.onFailure( new IllegalArgumentException( "Unable to use processor ranges, floating-point (with greater precision) processors " diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index b4d7ffbc3d0d..825db3c31a99 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.desirednodes; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -24,7 +24,7 @@ import java.util.Objects; public class UpdateDesiredNodesRequest extends AcknowledgedRequest { - private static final Version DRY_RUN_VERSION = Version.V_8_4_0; + private static final TransportVersion DRY_RUN_VERSION = TransportVersion.V_8_4_0; private final String historyID; private final long version; @@ -58,7 +58,7 @@ public UpdateDesiredNodesRequest(StreamInput in) throws IOException { this.historyID = in.readString(); this.version = in.readLong(); this.nodes = in.readList(DesiredNode::readFrom); - if (in.getVersion().onOrAfter(DRY_RUN_VERSION)) { + if (in.getTransportVersion().onOrAfter(DRY_RUN_VERSION)) { this.dryRun = in.readBoolean(); } else { this.dryRun = false; @@ -71,7 +71,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(historyID); out.writeLong(version); out.writeList(nodes); - if (out.getVersion().onOrAfter(DRY_RUN_VERSION)) { + if (out.getTransportVersion().onOrAfter(DRY_RUN_VERSION)) { out.writeBoolean(dryRun); } } @@ -98,7 +98,7 @@ public boolean isDryRun() { return dryRun; } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java index 5cf4354099ea..0d99cc9d693e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.desirednodes; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -19,7 +19,7 @@ import java.util.Objects; public class UpdateDesiredNodesResponse extends ActionResponse implements ToXContentObject { - private static final Version DRY_RUN_SUPPORTING_VERSION = Version.V_8_4_0; + private static final TransportVersion DRY_RUN_SUPPORTING_VERSION = TransportVersion.V_8_4_0; private final boolean replacedExistingHistoryId; private final boolean dryRun; @@ -36,13 +36,13 @@ public UpdateDesiredNodesResponse(boolean replacedExistingHistoryId, boolean dry public UpdateDesiredNodesResponse(StreamInput in) throws IOException { super(in); this.replacedExistingHistoryId = in.readBoolean(); - dryRun = in.getVersion().onOrAfter(DRY_RUN_SUPPORTING_VERSION) ? in.readBoolean() : false; + dryRun = in.getTransportVersion().onOrAfter(DRY_RUN_SUPPORTING_VERSION) ? in.readBoolean() : false; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(replacedExistingHistoryId); - if (out.getVersion().onOrAfter(DRY_RUN_SUPPORTING_VERSION)) { + if (out.getTransportVersion().onOrAfter(DRY_RUN_SUPPORTING_VERSION)) { out.writeBoolean(dryRun); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java index 6fa578fbd05b..74ac542214a9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java @@ -335,7 +335,7 @@ public String toString() { + version + '\'' + ", exception='" - + exception.getMessage() + + (exception == null ? "null" : exception.getMessage()) + "'" + '}'; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 3d4b690c5bba..e87427c3fdef 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,7 +36,7 @@ public NodesHotThreadsRequest(StreamInput in) throws IOException { type = HotThreads.ReportType.of(in.readString()); interval = in.readTimeValue(); snapshots = in.readInt(); - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { sortOrder = HotThreads.SortOrder.of(in.readString()); } } @@ -118,7 +118,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type.getTypeValue()); out.writeTimeValue(interval); out.writeInt(snapshots); - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeString(sortOrder.getOrderValue()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index ca9be0a65178..03f66b5e6d2f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.info; import org.elasticsearch.Build; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -72,7 +73,7 @@ public NodeInfo(StreamInput in) throws IOException { addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); - if (in.getVersion().onOrAfter(Version.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); } } @@ -193,7 +194,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(getInfo(HttpInfo.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class)); - if (out.getVersion().onOrAfter(Version.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { out.writeOptionalWriteable(getInfo(AggregationInfo.class)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java index 974e90b11d8d..e6a91b152cad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.node.ReportingService; @@ -41,7 +41,7 @@ public PluginsAndModules(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeList(plugins); } else { out.writeList(plugins.stream().map(PluginRuntimeInfo::descriptor).toList()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index 756e4312784a..be542d375aa4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -8,6 +8,8 @@ package org.elasticsearch.action.admin.cluster.node.reload; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -37,6 +39,8 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesActi NodesReloadSecureSettingsRequest.NodeRequest, NodesReloadSecureSettingsResponse.NodeResponse> { + private static final Logger logger = LogManager.getLogger(TransportNodesReloadSecureSettingsAction.class); + private final Environment environment; private final PluginsService pluginsService; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java new file mode 100644 index 000000000000..c7964a8b4dfd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.shutdown; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Collection; +import java.util.Objects; +import java.util.Set; + +/** + * A node-specific request derived from the corresponding {@link PrevalidateShardPathRequest}. +*/ +public class NodePrevalidateShardPathRequest extends TransportRequest { + + private final Set shardIds; + + public NodePrevalidateShardPathRequest(Collection shardIds) { + this.shardIds = Set.copyOf(Objects.requireNonNull(shardIds)); + } + + public NodePrevalidateShardPathRequest(StreamInput in) throws IOException { + super(in); + this.shardIds = Set.copyOf(Objects.requireNonNull(in.readSet(ShardId::new))); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(shardIds, (o, value) -> value.writeTo(o)); + } + + public Set getShardIds() { + return shardIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof NodePrevalidateShardPathRequest == false) return false; + NodePrevalidateShardPathRequest other = (NodePrevalidateShardPathRequest) o; + return Objects.equals(shardIds, other.shardIds); + } + + @Override + public int hashCode() { + return Objects.hash(shardIds); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java new file mode 100644 index 000000000000..9b23439f1c07 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.shutdown; + +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.Objects; +import java.util.Set; + +public class NodePrevalidateShardPathResponse extends BaseNodeResponse { + + private final Set shardIds; + + protected NodePrevalidateShardPathResponse(DiscoveryNode node, Set shardIds) { + super(node); + this.shardIds = Set.copyOf(Objects.requireNonNull(shardIds)); + } + + protected NodePrevalidateShardPathResponse(StreamInput in) throws IOException { + super(in); + shardIds = Set.copyOf(Objects.requireNonNull(in.readSet(ShardId::new))); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(shardIds); + } + + public Set getShardIds() { + return shardIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof NodePrevalidateShardPathResponse == false) return false; + NodePrevalidateShardPathResponse other = (NodePrevalidateShardPathResponse) o; + return Objects.equals(shardIds, other.shardIds) && Objects.equals(getNode(), other.getNode()); + } + + @Override + public int hashCode() { + return Objects.hash(shardIds, getNode()); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java index 0115ee973c1f..4f6ea0d5b83d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java @@ -8,9 +8,11 @@ package org.elasticsearch.action.admin.cluster.node.shutdown; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -118,15 +120,19 @@ public static NodeResult fromXContent(XContentParser parser) throws IOException } } - // The prevalidation result of a node - public record Result(boolean isSafe, String message) implements ToXContentObject, Writeable { + /** + * The prevalidation result of a node. + * @param reason is nullable only for BWC between 8.6 and 8.7. In a fully-upgraded 8.7, it should always be non-null. + */ + public record Result(boolean isSafe, @Nullable Reason reason, String message) implements ToXContentObject, Writeable { private static final ParseField IS_SAFE_FIELD = new ParseField("is_safe"); + private static final ParseField REASON_FIELD = new ParseField("reason"); private static final ParseField MESSAGE_FIELD = new ParseField("message"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "nodes_removal_prevalidation_result", - objects -> new Result((boolean) objects[0], (String) objects[1]) + objects -> new Result((boolean) objects[0], Reason.fromString((String) objects[1]), (String) objects[2]) ); static { @@ -135,23 +141,33 @@ public record Result(boolean isSafe, String message) implements ToXContentObject static void configureParser(ConstructingObjectParser parser) { parser.declareBoolean(ConstructingObjectParser.constructorArg(), IS_SAFE_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), REASON_FIELD); parser.declareString(ConstructingObjectParser.constructorArg(), MESSAGE_FIELD); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isSafe); + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + reason.writeTo(out); + } out.writeString(message); } public static Result readFrom(final StreamInput in) throws IOException { - return new Result(in.readBoolean(), in.readString()); + if (in.getTransportVersion().before(TransportVersion.V_8_7_0)) { + return new Result(in.readBoolean(), null, in.readString()); + } + return new Result(in.readBoolean(), Reason.readFrom(in), in.readString()); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(IS_SAFE_FIELD.getPreferredName(), isSafe); + if (reason != null) { + builder.field(REASON_FIELD.getPreferredName(), reason.reason); + } builder.field(MESSAGE_FIELD.getPreferredName(), message); builder.endObject(); return builder; @@ -161,4 +177,40 @@ public static Result fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } } + + public enum Reason implements Writeable { + NO_PROBLEMS("no_problems"), + NO_RED_SHARDS_ON_NODE("no_red_shards_on_node"), + NO_RED_SHARDS_EXCEPT_SEARCHABLE_SNAPSHOTS("no_red_shards_except_searchable_snapshots"), + RED_SHARDS_ON_NODE("red_shards_on_node"), + UNABLE_TO_VERIFY("unable_to_verify_red_shards"); + + private final String reason; + + Reason(String reason) { + this.reason = reason; + } + + public String reason() { + return reason; + } + + public static Reason readFrom(final StreamInput in) throws IOException { + return fromString(in.readString()); + } + + public static Reason fromString(String s) { + for (Reason r : values()) { + if (s.equalsIgnoreCase(r.reason)) { + return r; + } + } + throw new IllegalArgumentException("unexpected Reason value [" + s + "]"); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(reason); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index 8f5dd9a0f83f..507309056793 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -8,11 +8,13 @@ package org.elasticsearch.action.admin.cluster.node.shutdown; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Arrays; @@ -29,6 +31,7 @@ public class PrevalidateNodeRemovalRequest extends MasterNodeReadRequest { + + private final Set shardIds; + + public PrevalidateShardPathRequest(Set shardIds, String... nodeIds) { + super(nodeIds); + this.shardIds = Set.copyOf(Objects.requireNonNull(shardIds)); + } + + public PrevalidateShardPathRequest(StreamInput in) throws IOException { + super(in); + this.shardIds = Set.copyOf(Objects.requireNonNull(in.readSet(ShardId::new))); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(shardIds); + } + + public Set getShardIds() { + return shardIds; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o instanceof PrevalidateShardPathRequest == false) return false; + PrevalidateShardPathRequest other = (PrevalidateShardPathRequest) o; + return Objects.equals(shardIds, other.shardIds) + && Arrays.equals(nodesIds(), other.nodesIds()) + && Objects.equals(timeout(), other.timeout()); + } + + @Override + public int hashCode() { + return Objects.hash(shardIds, Arrays.hashCode(nodesIds()), timeout()); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java new file mode 100644 index 000000000000..2bbaa6eb2827 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.shutdown; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.List; + +public class PrevalidateShardPathResponse extends BaseNodesResponse { + + public PrevalidateShardPathResponse( + ClusterName clusterName, + List nodes, + List failures + ) { + super(clusterName, nodes, failures); + } + + public PrevalidateShardPathResponse(StreamInput in) throws IOException { + super(in); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readList(NodePrevalidateShardPathResponse::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java index 44a12dd54936..f3ce39a0cdbf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateNodeRemovalAction.java @@ -10,8 +10,10 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -22,12 +24,16 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -46,13 +52,16 @@ public class TransportPrevalidateNodeRemovalAction extends TransportMasterNodeRe private static final Logger logger = LogManager.getLogger(TransportPrevalidateNodeRemovalAction.class); + private final NodeClient client; + @Inject public TransportPrevalidateNodeRemovalAction( TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver + IndexNameExpressionResolver indexNameExpressionResolver, + NodeClient client ) { super( PrevalidateNodeRemovalAction.NAME, @@ -66,6 +75,7 @@ public TransportPrevalidateNodeRemovalAction( PrevalidateNodeRemovalResponse::new, ThreadPool.Names.SAME ); + this.client = client; } @Override @@ -73,14 +83,12 @@ protected void masterOperation( Task task, PrevalidateNodeRemovalRequest request, ClusterState state, - ActionListener listener + ActionListener responseListener ) { - try { - Set discoveryNodes = resolveNodes(request, state.nodes()); - doPrevalidation(discoveryNodes, state, listener); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.run(responseListener, listener -> { + Set requestNodes = resolveNodes(request, state.nodes()); + doPrevalidation(request, requestNodes, state, listener); + }); } public static Set resolveNodes(PrevalidateNodeRemovalRequest request, DiscoveryNodes discoveryNodes) { @@ -141,62 +149,157 @@ protected ClusterBlockException checkBlock(PrevalidateNodeRemovalRequest request } private void doPrevalidation( - Set nodes, + PrevalidateNodeRemovalRequest request, + Set requestNodes, ClusterState clusterState, ActionListener listener ) { - assert nodes != null && nodes.isEmpty() == false; + assert requestNodes != null && requestNodes.isEmpty() == false; - logger.debug(() -> "prevalidate node removal for nodes " + nodes); + logger.debug(() -> "prevalidate node removal for nodes " + requestNodes); ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState); Metadata metadata = clusterState.metadata(); - switch (clusterStateHealth.getStatus()) { - case GREEN, YELLOW -> { - List nodesResults = nodes.stream() - .map(dn -> new NodeResult(dn.getName(), dn.getId(), dn.getExternalId(), new Result(true, ""))) - .toList(); - listener.onResponse( - new PrevalidateNodeRemovalResponse(new NodesRemovalPrevalidation(true, "cluster status is not RED", nodesResults)) - ); - } - case RED -> { - Set redIndices = clusterStateHealth.getIndices() - .entrySet() - .stream() - .filter(entry -> entry.getValue().getStatus() == ClusterHealthStatus.RED) - .map(Map.Entry::getKey) - .collect(Collectors.toSet()); - // If all red indices are searchable snapshot indices, it is safe to remove any node. - Set redNonSSIndices = redIndices.stream() - .map(metadata::index) - .filter(i -> i.isSearchableSnapshot() == false) - .map(im -> im.getIndex().getName()) - .collect(Collectors.toSet()); - if (redNonSSIndices.isEmpty()) { - List nodeResults = nodes.stream() - .map(dn -> new NodeResult(dn.getName(), dn.getId(), dn.getExternalId(), new Result(true, ""))) - .toList(); - listener.onResponse( - new PrevalidateNodeRemovalResponse( - new NodesRemovalPrevalidation(true, "all red indices are searchable snapshot indices", nodeResults) - ) - ); - } else { - List nodeResults = nodes.stream() - .map( - dn -> new NodeResult( - dn.getName(), - dn.getId(), - dn.getExternalId(), - new Result(false, "node may contain a copy of a red index shard") - ) - ) - .toList(); - listener.onResponse( - new PrevalidateNodeRemovalResponse(new NodesRemovalPrevalidation(false, "cluster health is RED", nodeResults)) - ); + DiscoveryNodes clusterNodes = clusterState.getNodes(); + if (clusterStateHealth.getStatus() == ClusterHealthStatus.GREEN || clusterStateHealth.getStatus() == ClusterHealthStatus.YELLOW) { + List nodesResults = requestNodes.stream() + .map( + dn -> new NodeResult( + dn.getName(), + dn.getId(), + dn.getExternalId(), + new Result(true, NodesRemovalPrevalidation.Reason.NO_PROBLEMS, "") + ) + ) + .toList(); + listener.onResponse( + new PrevalidateNodeRemovalResponse(new NodesRemovalPrevalidation(true, "cluster status is not RED", nodesResults)) + ); + return; + } + // RED cluster state + Set redIndices = clusterStateHealth.getIndices() + .entrySet() + .stream() + .filter(entry -> entry.getValue().getStatus() == ClusterHealthStatus.RED) + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); + // If all red indices are searchable snapshot indices, it is safe to remove any node. + Set redNonSSIndices = redIndices.stream() + .map(metadata::index) + .filter(i -> i.isSearchableSnapshot() == false) + .map(im -> im.getIndex().getName()) + .collect(Collectors.toSet()); + if (redNonSSIndices.isEmpty()) { + List nodeResults = requestNodes.stream() + .map( + dn -> new NodeResult( + dn.getName(), + dn.getId(), + dn.getExternalId(), + new Result(true, NodesRemovalPrevalidation.Reason.NO_RED_SHARDS_EXCEPT_SEARCHABLE_SNAPSHOTS, "") + ) + ) + .toList(); + listener.onResponse( + new PrevalidateNodeRemovalResponse( + new NodesRemovalPrevalidation(true, "all red indices are searchable snapshot indices", nodeResults) + ) + ); + } else { + // Reach out to the nodes to find out whether they contain copies of the red non-searchable-snapshot indices + Set redShards = clusterStateHealth.getIndices() + .entrySet() + .stream() + .filter(indexHealthEntry -> redNonSSIndices.contains(indexHealthEntry.getKey())) + .map(Map.Entry::getValue) // ClusterHealthIndex of red non-searchable-snapshot indices + .flatMap( + redIndexHealth -> redIndexHealth.getShards() + .values() + .stream() + .filter(shardHealth -> shardHealth.getStatus() == ClusterHealthStatus.RED) + .map(redShardHealth -> Tuple.tuple(redIndexHealth.getIndex(), redShardHealth)) + ) // (Index, ClusterShardHealth) of all red shards + .map( + redIndexShardHealthTuple -> new ShardId( + metadata.index(redIndexShardHealthTuple.v1()).getIndex(), + redIndexShardHealthTuple.v2().getShardId() + ) + ) // Convert to ShardId + .collect(Collectors.toSet()); + var nodeIds = requestNodes.stream().map(DiscoveryNode::getId).toList().toArray(new String[0]); + var checkShardsRequest = new PrevalidateShardPathRequest(redShards, nodeIds).timeout(request.timeout()); + client.execute(TransportPrevalidateShardPathAction.TYPE, checkShardsRequest, new ActionListener<>() { + @Override + public void onResponse(PrevalidateShardPathResponse response) { + listener.onResponse(new PrevalidateNodeRemovalResponse(createPrevalidationResult(clusterNodes, response))); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); } + }); + } + } + + private NodesRemovalPrevalidation createPrevalidationResult(DiscoveryNodes nodes, PrevalidateShardPathResponse response) { + List nodeResults = new ArrayList<>(response.getNodes().size() + response.failures().size()); + for (NodePrevalidateShardPathResponse nodeResponse : response.getNodes()) { + Result result; + if (nodeResponse.getShardIds().isEmpty()) { + result = new Result(true, NodesRemovalPrevalidation.Reason.NO_RED_SHARDS_ON_NODE, ""); + } else { + result = new Result( + false, + NodesRemovalPrevalidation.Reason.RED_SHARDS_ON_NODE, + Strings.format("node contains copies of the following red shards: %s", nodeResponse.getShardIds()) + ); } + nodeResults.add( + new NodeResult( + nodeResponse.getNode().getName(), + nodeResponse.getNode().getId(), + nodeResponse.getNode().getExternalId(), + result + ) + ); + } + for (FailedNodeException failedResponse : response.failures()) { + DiscoveryNode node = nodes.get(failedResponse.nodeId()); + nodeResults.add( + new NodeResult( + node.getName(), + node.getId(), + node.getExternalId(), + new Result( + false, + NodesRemovalPrevalidation.Reason.UNABLE_TO_VERIFY, + Strings.format("failed contacting the node: %s", failedResponse.getDetailedMessage()) + ) + ) + ); + } + // determine overall result from the node results. + Set unsafeNodeRemovals = response.getNodes() + .stream() + .filter(r -> r.getShardIds().isEmpty() == false) + .map(r -> r.getNode().getId()) + .collect(Collectors.toSet()); + if (unsafeNodeRemovals.isEmpty() == false) { + return new NodesRemovalPrevalidation( + false, + Strings.format("removal of the following nodes might not be safe: %s", unsafeNodeRemovals), + nodeResults + ); + } + if (response.failures().isEmpty() == false) { + Set unknownNodeRemovals = response.failures().stream().map(FailedNodeException::nodeId).collect(Collectors.toSet()); + return new NodesRemovalPrevalidation( + false, + Strings.format("cannot prevalidate removal of nodes with the following IDs: %s", unknownNodeRemovals), + nodeResults + ); } + return new NodesRemovalPrevalidation(true, "", nodeResults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java new file mode 100644 index 000000000000..251129795026 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportPrevalidateShardPathAction.java @@ -0,0 +1,126 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.node.shutdown; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; + +/** + * Given a set of shard IDs, checks which of those shards have a matching directory in the local data path. + * This is used by {@link PrevalidateNodeRemovalAction} to find out whether a node may contain some copy + * of a specific shard. The response contains a subset of the request shard IDs which are in the cluster state + * of this node and have a matching shard path on the local data path. + */ +public class TransportPrevalidateShardPathAction extends TransportNodesAction< + PrevalidateShardPathRequest, + PrevalidateShardPathResponse, + NodePrevalidateShardPathRequest, + NodePrevalidateShardPathResponse> { + + public static final String ACTION_NAME = "internal:admin/indices/prevalidate_shard_path"; + public static final ActionType TYPE = new ActionType<>(ACTION_NAME, PrevalidateShardPathResponse::new); + private static final Logger logger = LogManager.getLogger(TransportPrevalidateShardPathAction.class); + + private final TransportService transportService; + private final NodeEnvironment nodeEnv; + private final Settings settings; + + @Inject + public TransportPrevalidateShardPathAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + NodeEnvironment nodeEnv, + Settings settings + ) { + super( + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + PrevalidateShardPathRequest::new, + NodePrevalidateShardPathRequest::new, + ThreadPool.Names.MANAGEMENT, + NodePrevalidateShardPathResponse.class + ); + this.transportService = transportService; + this.nodeEnv = nodeEnv; + this.settings = settings; + } + + @Override + protected PrevalidateShardPathResponse newResponse( + PrevalidateShardPathRequest request, + List nodeResponses, + List failures + ) { + return new PrevalidateShardPathResponse(clusterService.getClusterName(), nodeResponses, failures); + } + + @Override + protected NodePrevalidateShardPathRequest newNodeRequest(PrevalidateShardPathRequest request) { + return new NodePrevalidateShardPathRequest(request.getShardIds()); + } + + @Override + protected NodePrevalidateShardPathResponse newNodeResponse(StreamInput in, DiscoveryNode node) throws IOException { + return new NodePrevalidateShardPathResponse(in); + } + + @Override + protected NodePrevalidateShardPathResponse nodeOperation(NodePrevalidateShardPathRequest request, Task task) { + Set localShards = new HashSet<>(); + ShardPath shardPath = null; + // For each shard we only check whether the shard path exists, regardless of whether the content is a valid index or not. + for (ShardId shardId : request.getShardIds()) { + try { + var indexMetadata = clusterService.state().metadata().index(shardId.getIndex()); + String customDataPath = null; + if (indexMetadata != null) { + customDataPath = new IndexSettings(indexMetadata, settings).customDataPath(); + } else { + // The index is not known to this node. This shouldn't happen, but it can be safely ignored for this operation. + logger.warn("node doesn't have metadata for the index [{}]", shardId.getIndex()); + } + shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, customDataPath); + if (shardPath != null) { + localShards.add(shardId); + } + } catch (IOException e) { + final String path = shardPath != null ? shardPath.resolveIndex().toString() : ""; + logger.error(() -> String.format(Locale.ROOT, "error loading shard path for shard [%s]", shardId), e); + } + } + return new NodePrevalidateShardPathResponse(transportService.getLocalNode(), localShards); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 51ad85f99328..a53ed8dacc36 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -11,11 +11,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -43,17 +41,7 @@ public CancelTasksResponse( super(tasks, taskFailures, nodeFailures); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return super.toXContent(builder, params); - } - public static CancelTasksResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - - @Override - public String toString() { - return Strings.toString(this, true, true); - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 32ca0a59a0b1..cf623c37f721 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.get; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -16,16 +17,19 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.tasks.RemovedTaskListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -39,8 +43,9 @@ import java.io.IOException; +import static java.util.Objects.requireNonNullElse; import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; -import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForCompletionTimeout; +import static org.elasticsearch.core.TimeValue.timeValueSeconds; /** * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. @@ -53,6 +58,9 @@ * */ public class TransportGetTaskAction extends HandledTransportAction { + + private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30); + private final ThreadPool threadPool; private final ClusterService clusterService; private final TransportService transportService; @@ -130,19 +138,47 @@ void getRunningTaskFromNode(Task thisTask, GetTaskRequest request, ActionListene getFinishedTaskFromIndex(thisTask, request, listener); } else { if (request.getWaitForCompletion()) { - // Shift to the generic thread pool and let it wait for the task to complete so we don't block any important threads. - threadPool.generic().execute(new AbstractRunnable() { - @Override - protected void doRun() { - taskManager.waitForTaskCompletion(runningTask, waitForCompletionTimeout(request.getTimeout())); - waitedForCompletion(thisTask, request, runningTask.taskInfo(clusterService.localNode().getId(), true), listener); + final ListenableActionFuture future = new ListenableActionFuture<>(); + RemovedTaskListener removedTaskListener = task -> { + if (task.equals(runningTask)) { + future.onResponse(null); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + }; + taskManager.registerRemovedTaskListener(removedTaskListener); + // Check if the task had finished before we registered the listener, so we wouldn't wait + // for an event that would never come + if (taskManager.getTask(request.getTaskId().getId()) == null) { + future.onResponse(null); + } + final ActionListener waitedForCompletionListener = ActionListener.runBefore( + ActionListener.wrap( + v -> waitedForCompletion( + thisTask, + request, + runningTask.taskInfo(clusterService.localNode().getId(), true), + listener + ), + listener::onFailure + ), + () -> taskManager.unregisterRemovedTaskListener(removedTaskListener) + ); + if (future.isDone()) { + // The task has already finished, we can run the completion listener in the same thread + waitedForCompletionListener.onResponse(null); + } else { + future.addListener( + new ContextPreservingActionListener<>( + threadPool.getThreadContext().newRestorableContext(false), + waitedForCompletionListener + ) + ); + var failByTimeout = threadPool.schedule( + () -> future.onFailure(new ElasticsearchTimeoutException("Timed out waiting for completion of task")), + requireNonNullElse(request.getTimeout(), DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT), + ThreadPool.Names.SAME + ); + future.addListener(ActionListener.wrap(failByTimeout::cancel)); + } } else { TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); listener.onResponse(new GetTaskResponse(new TaskResult(false, info))); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 3cd7c36fafc3..5b0194c81283 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.common.Strings; @@ -40,7 +40,7 @@ public ListTasksRequest(StreamInput in) throws IOException { super(in); detailed = in.readBoolean(); waitForCompletion = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { descriptions = in.readStringArray(); } } @@ -50,7 +50,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(detailed); out.writeBoolean(waitForCompletion); - if (out.getVersion().onOrAfter(Version.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { out.writeStringArray(descriptions); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 28df33674ee7..695202630b47 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -16,22 +16,24 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -39,7 +41,7 @@ /** * Returns the list of tasks currently running on the nodes */ -public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { +public class ListTasksResponse extends BaseTasksResponse { private static final String TASKS = "tasks"; private final List tasks; @@ -142,7 +144,7 @@ private void buildTaskGroups() { topLevelTasks.add(taskGroup); } } - this.groups = Collections.unmodifiableList(topLevelTasks.stream().map(TaskGroup.Builder::build).toList()); + this.groups = topLevelTasks.stream().map(TaskGroup.Builder::build).toList(); } /** @@ -155,82 +157,98 @@ public List getTasks() { /** * Convert this task response to XContent grouping by executing nodes. */ - public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params params, DiscoveryNodes discoveryNodes) - throws IOException { - toXContentCommon(builder, params); - builder.startObject("nodes"); - for (Map.Entry> entry : getPerNodeTasks().entrySet()) { - DiscoveryNode node = discoveryNodes.get(entry.getKey()); - builder.startObject(entry.getKey()); - if (node != null) { - // If the node is no longer part of the cluster, oh well, we'll just skip it's useful information. - builder.field("name", node.getName()); - builder.field("transport_address", node.getAddress().toString()); - builder.field("host", node.getHostName()); - builder.field("ip", node.getAddress()); - - builder.startArray("roles"); - for (DiscoveryNodeRole role : node.getRoles()) { - builder.value(role.roleName()); - } - builder.endArray(); + public ChunkedToXContentObject groupedByNode(Supplier nodesInCluster) { + return ignored -> { + final var discoveryNodes = nodesInCluster.get(); + return Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(); + toXContentCommon(builder, params); + builder.startObject("nodes"); + return builder; + }), Iterators.flatMap(getPerNodeTasks().entrySet().iterator(), entry -> { + DiscoveryNode node = discoveryNodes.get(entry.getKey()); + return Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(entry.getKey()); + if (node != null) { + // If the node is no longer part of the cluster, oh well, we'll just skip its useful information. + builder.field("name", node.getName()); + builder.field("transport_address", node.getAddress().toString()); + builder.field("host", node.getHostName()); + builder.field("ip", node.getAddress()); + + builder.startArray("roles"); + for (DiscoveryNodeRole role : node.getRoles()) { + builder.value(role.roleName()); + } + builder.endArray(); - if (node.getAttributes().isEmpty() == false) { - builder.startObject("attributes"); - for (Map.Entry attrEntry : node.getAttributes().entrySet()) { - builder.field(attrEntry.getKey(), attrEntry.getValue()); + if (node.getAttributes().isEmpty() == false) { + builder.startObject("attributes"); + for (Map.Entry attrEntry : node.getAttributes().entrySet()) { + builder.field(attrEntry.getKey(), attrEntry.getValue()); + } + builder.endObject(); + } } + builder.startObject(TASKS); + return builder; + }), Iterators.flatMap(entry.getValue().iterator(), task -> Iterators.single((builder, params) -> { + builder.startObject(task.taskId().toString()); + task.toXContent(builder, params); builder.endObject(); - } - } - builder.startObject(TASKS); - for (TaskInfo task : entry.getValue()) { - builder.startObject(task.taskId().toString()); - task.toXContent(builder, params); + return builder; + })), Iterators.single((builder, params) -> { + builder.endObject(); + builder.endObject(); + return builder; + })); + }), Iterators.single((builder, params) -> { builder.endObject(); - } - builder.endObject(); - builder.endObject(); - } - builder.endObject(); - return builder; + builder.endObject(); + return builder; + })); + }; } /** * Convert this response to XContent grouping by parent tasks. */ - public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Params params) throws IOException { - toXContentCommon(builder, params); - builder.startObject(TASKS); - for (TaskGroup group : getTaskGroups()) { + public ChunkedToXContentObject groupedByParent() { + return ignored -> Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(); + toXContentCommon(builder, params); + builder.startObject(TASKS); + return builder; + }), getTaskGroups().stream().map(group -> (builder, params) -> { builder.field(group.taskInfo().taskId().toString()); group.toXContent(builder, params); - } - builder.endObject(); - return builder; + return builder; + }).iterator(), Iterators.single((builder, params) -> { + builder.endObject(); + builder.endObject(); + return builder; + })); } /** * Presents a flat list of tasks */ - public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException { - toXContentCommon(builder, params); - builder.startArray(TASKS); - for (TaskInfo taskInfo : getTasks()) { + public ChunkedToXContentObject groupedByNone() { + return ignored -> Iterators.concat(Iterators.single((builder, params) -> { + builder.startObject(); + toXContentCommon(builder, params); + builder.startArray(TASKS); + return builder; + }), getTasks().stream().map(taskInfo -> (builder, params) -> { builder.startObject(); taskInfo.toXContent(builder, params); builder.endObject(); - } - builder.endArray(); - return builder; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - toXContentGroupedByNone(builder, params); - builder.endObject(); - return builder; + return builder; + }).iterator(), Iterators.single((builder, params) -> { + builder.endArray(); + builder.endObject(); + return builder; + })); } public static ListTasksResponse fromXContent(XContentParser parser) { @@ -239,6 +257,7 @@ public static ListTasksResponse fromXContent(XContentParser parser) { @Override public String toString() { - return Strings.toString(this, true, true); + return Strings.toString(ChunkedToXContent.wrapAsToXContent(groupedByNone()), true, true); } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index b81f7e3db3c7..5af717dfbb88 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -8,22 +8,33 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; +import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.ListenableActionFuture; +import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.RemovedTaskListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import static java.util.Objects.requireNonNullElse; import static org.elasticsearch.core.TimeValue.timeValueSeconds; public class TransportListTasksAction extends TransportTasksAction { @@ -66,19 +77,75 @@ protected void taskOperation(Task actionTask, ListTasksRequest request, Task tas } @Override - protected void processTasks(ListTasksRequest request, Consumer operation) { + protected void processTasks(ListTasksRequest request, Consumer operation, ActionListener nodeOperation) { if (request.getWaitForCompletion()) { - long timeoutNanos = waitForCompletionTimeout(request.getTimeout()); - operation = operation.andThen(task -> { - if (task.getAction().startsWith(ListTasksAction.NAME)) { - // It doesn't make sense to wait for List Tasks and it can cause an infinite loop of the task waiting - // for itself or one of its child tasks - return; + final ListenableActionFuture future = new ListenableActionFuture<>(); + final Set removedTasks = Sets.newConcurrentHashSet(); + final Set matchedTasks = Sets.newConcurrentHashSet(); + final RefCounted removalRefs = AbstractRefCounted.of(() -> { + matchedTasks.removeAll(removedTasks); + removedTasks.clear(); + if (matchedTasks.isEmpty()) { + future.onResponse(null); } - taskManager.waitForTaskCompletion(task, timeoutNanos); }); + + final AtomicBoolean collectionComplete = new AtomicBoolean(); + final RemovedTaskListener removedTaskListener = task -> { + if (collectionComplete.get() == false && removalRefs.tryIncRef()) { + removedTasks.add(task); + removalRefs.decRef(); + } else { + matchedTasks.remove(task); + if (matchedTasks.isEmpty()) { + future.onResponse(null); + } + } + }; + taskManager.registerRemovedTaskListener(removedTaskListener); + final ActionListener allMatchedTasksRemovedListener = ActionListener.runBefore( + nodeOperation, + () -> taskManager.unregisterRemovedTaskListener(removedTaskListener) + ); + try { + processTasks(request, task -> { + if (task.getAction().startsWith(ListTasksAction.NAME) == false) { + // It doesn't make sense to wait for List Tasks and it can cause an infinite loop of the task waiting + // for itself or one of its child tasks + matchedTasks.add(task); + } + operation.accept(task); + }); + } catch (Exception e) { + allMatchedTasksRemovedListener.onFailure(e); + return; + } + removalRefs.decRef(); + collectionComplete.set(true); + + if (future.isDone()) { + // No tasks to wait, we can run nodeOperation in the management pool + allMatchedTasksRemovedListener.onResponse(null); + } else { + final var threadPool = clusterService.threadPool(); + future.addListener( + new ThreadedActionListener<>( + threadPool.executor(ThreadPool.Names.MANAGEMENT), + new ContextPreservingActionListener<>( + threadPool.getThreadContext().newRestorableContext(false), + allMatchedTasksRemovedListener + ) + ) + ); + var cancellable = threadPool.schedule( + () -> future.onFailure(new ElasticsearchTimeoutException("Timed out waiting for completion of tasks")), + requireNonNullElse(request.getTimeout(), DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT), + ThreadPool.Names.SAME + ); + future.addListener(ActionListener.wrap(cancellable::cancel)); + } + } else { + super.processTasks(request, operation, nodeOperation); } - super.processTasks(request, operation); } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index da8642241f99..4011d5365dd5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -55,7 +56,8 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - repositories.toXContent(builder, new DelegatingMapParams(Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params)); + ChunkedToXContent.wrapAsToXContent(repositories) + .toXContent(builder, new DelegatingMapParams(Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params)); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index 9b787a26134b..1964e0f468cd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -76,7 +76,12 @@ protected void masterOperation( ClusterState state, final ActionListener listener ) { - listener.onResponse(new GetRepositoriesResponse(new RepositoriesMetadata(getRepositories(state, request.repositories())))); + RepositoriesResult result = getRepositories(state, request.repositories()); + if (result.hasMissingRepositories()) { + listener.onFailure(new RepositoryMissingException(String.join(", ", result.missing()))); + } else { + listener.onResponse(new GetRepositoriesResponse(new RepositoriesMetadata(result.metadata))); + } } /** @@ -84,13 +89,14 @@ protected void masterOperation( * * @param state Cluster state * @param repoNames Repository names or patterns to get metadata for - * @return list of repository metadata + * @return a result with the repository metadata that were found in the cluster state and the missing repositories */ - public static List getRepositories(ClusterState state, String[] repoNames) { + public static RepositoriesResult getRepositories(ClusterState state, String[] repoNames) { RepositoriesMetadata repositories = state.metadata().custom(RepositoriesMetadata.TYPE, RepositoriesMetadata.EMPTY); if (isMatchAll(repoNames)) { - return repositories.repositories(); + return new RepositoriesResult(repositories.repositories()); } + final List missingRepositories = new ArrayList<>(); final List includePatterns = new ArrayList<>(); final List excludePatterns = new ArrayList<>(); boolean seenWildcard = false; @@ -102,7 +108,7 @@ public static List getRepositories(ClusterState state, Strin seenWildcard = true; } else { if (repositories.repository(repositoryOrPattern) == null) { - throw new RepositoryMissingException(repositoryOrPattern); + missingRepositories.add(repositoryOrPattern); } } includePatterns.add(repositoryOrPattern); @@ -119,6 +125,20 @@ public static List getRepositories(ClusterState state, Strin } } } - return List.copyOf(repositoryListBuilder); + return new RepositoriesResult(List.copyOf(repositoryListBuilder), missingRepositories); + } + + /** + * A holder class that consists of the repository metadata and the names of the repositories that were not found in the cluster state. + */ + public record RepositoriesResult(List metadata, List missing) { + + RepositoriesResult(List repositoryMetadata) { + this(repositoryMetadata, List.of()); + } + + boolean hasMissingRepositories() { + return missing.isEmpty() == false; + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 4e4743aea558..ba9ba10a80d0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -8,25 +8,32 @@ package org.elasticsearch.action.admin.cluster.reroute; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.master.IsAcknowledgedSupplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; import java.util.Objects; +import static org.elasticsearch.action.support.master.AcknowledgedResponse.ACKNOWLEDGED_KEY; + /** * Response returned after a cluster reroute request */ -public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXContentObject { +public class ClusterRerouteResponse extends ActionResponse implements IsAcknowledgedSupplier, ChunkedToXContentObject { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class); public static final String STATE_FIELD_DEPRECATION_MESSAGE = "The [state] field in the response to the reroute API is deprecated " @@ -37,15 +44,17 @@ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXC */ private final ClusterState state; private final RoutingExplanations explanations; + private final boolean acknowledged; ClusterRerouteResponse(StreamInput in) throws IOException { super(in); + acknowledged = in.readBoolean(); state = ClusterState.readFrom(in, null); explanations = RoutingExplanations.readFrom(in); } ClusterRerouteResponse(boolean acknowledged, ClusterState state, RoutingExplanations explanations) { - super(acknowledged); + this.acknowledged = acknowledged; this.state = state; this.explanations = explanations; } @@ -61,26 +70,45 @@ public RoutingExplanations getExplanations() { return this.explanations; } + @Override + public final boolean isAcknowledged() { + return acknowledged; + } + @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); + out.writeBoolean(acknowledged); state.writeTo(out); RoutingExplanations.writeTo(explanations, out); } + private boolean emitState(ToXContent.Params params) { + return Objects.equals(params.param("metric"), "none") == false; + } + @Override - protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { - if (Objects.equals(params.param("metric"), "none") == false) { - if (builder.getRestApiVersion() != RestApiVersion.V_7) { - deprecationLogger.critical(DeprecationCategory.API, "reroute_cluster_state", STATE_FIELD_DEPRECATION_MESSAGE); - } - builder.startObject("state"); - state.toXContent(builder, params); - builder.endObject(); + public Iterator toXContentChunked(ToXContent.Params outerParams) { + if (emitState(outerParams)) { + deprecationLogger.critical(DeprecationCategory.API, "reroute_cluster_state", STATE_FIELD_DEPRECATION_MESSAGE); } + return toXContentChunkedV7(outerParams); + } - if (params.paramAsBoolean("explain", false)) { - explanations.toXContent(builder, params); - } + @Override + public Iterator toXContentChunkedV7(ToXContent.Params outerParams) { + return Iterators.concat( + Iterators.single((builder, params) -> builder.startObject().field(ACKNOWLEDGED_KEY, isAcknowledged())), + emitState(outerParams) + ? ChunkedToXContentHelper.wrapWithObject("state", state.toXContentChunked(outerParams)) + : Collections.emptyIterator(), + Iterators.single((builder, params) -> { + if (params.paramAsBoolean("explain", false)) { + explanations.toXContent(builder, params); + } + + builder.endObject(); + return builder; + }) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java index 15af362fe4a4..76d59660da0b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.settings; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -37,12 +37,12 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); - assert in.getVersion().onOrAfter(Version.V_8_3_0); + assert in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); } @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getVersion().onOrAfter(Version.V_8_3_0); + assert out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); super.writeTo(out); } @@ -77,7 +77,7 @@ public int hashCode() { public Response(StreamInput in) throws IOException { super(in); - assert in.getVersion().onOrAfter(Version.V_8_3_0); + assert in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); persistentSettings = Settings.readSettingsFromStream(in); transientSettings = Settings.readSettingsFromStream(in); settings = Settings.readSettingsFromStream(in); @@ -91,7 +91,7 @@ public Response(Settings persistentSettings, Settings transientSettings, Setting @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getVersion().onOrAfter(Version.V_8_3_0); + assert out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); persistentSettings.writeTo(out); transientSettings.writeTo(out); settings.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 04acdb7a3fa4..e0085a45cd3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -54,7 +54,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest { - private static final Version FEATURE_RESET_ON_MASTER = Version.V_7_14_0; + private static final TransportVersion FEATURE_RESET_ON_MASTER = TransportVersion.V_7_14_0; public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOException { - if (in.getVersion().before(FEATURE_RESET_ON_MASTER)) { + if (in.getTransportVersion().before(FEATURE_RESET_ON_MASTER)) { throw new IllegalStateException( "feature reset is not available in a cluster that have nodes with version before " + FEATURE_RESET_ON_MASTER ); @@ -38,7 +38,7 @@ private ResetFeatureStateRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(FEATURE_RESET_ON_MASTER)) { + if (out.getTransportVersion().before(FEATURE_RESET_ON_MASTER)) { throw new IllegalStateException( "feature reset is not available in a cluster that have nodes with version before " + FEATURE_RESET_ON_MASTER ); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java index 6fa73bf2ea14..c0c928e2743d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/TransportResetFeatureStateAction.java @@ -73,11 +73,11 @@ protected void masterOperation( final int features = systemIndices.getFeatures().size(); GroupedActionListener groupedActionListener = new GroupedActionListener<>( + systemIndices.getFeatures().size(), listener.map(responses -> { assert features == responses.size(); return new ResetFeatureStateResponse(new ArrayList<>(responses)); - }), - systemIndices.getFeatures().size() + }) ); for (SystemIndices.Feature feature : systemIndices.getFeatures()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 30938d91b29a..35b6abed2c2f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Strings; @@ -40,19 +40,19 @@ public class GetSnapshotsRequest extends MasterNodeRequest public static final String NO_POLICY_PATTERN = "_none"; public static final boolean DEFAULT_VERBOSE_MODE = true; - public static final Version SLM_POLICY_FILTERING_VERSION = Version.V_7_16_0; + public static final TransportVersion SLM_POLICY_FILTERING_VERSION = TransportVersion.V_7_16_0; - public static final Version FROM_SORT_VALUE_VERSION = Version.V_7_16_0; + public static final TransportVersion FROM_SORT_VALUE_VERSION = TransportVersion.V_7_16_0; - public static final Version MULTIPLE_REPOSITORIES_SUPPORT_ADDED = Version.V_7_14_0; + public static final TransportVersion MULTIPLE_REPOSITORIES_SUPPORT_ADDED = TransportVersion.V_7_14_0; - public static final Version PAGINATED_GET_SNAPSHOTS_VERSION = Version.V_7_14_0; + public static final TransportVersion PAGINATED_GET_SNAPSHOTS_VERSION = TransportVersion.V_7_14_0; - public static final Version NUMERIC_PAGINATION_VERSION = Version.V_7_15_0; + public static final TransportVersion NUMERIC_PAGINATION_VERSION = TransportVersion.V_7_15_0; - private static final Version SORT_BY_SHARDS_OR_REPO_VERSION = Version.V_7_16_0; + private static final TransportVersion SORT_BY_SHARDS_OR_REPO_VERSION = TransportVersion.V_7_16_0; - private static final Version INDICES_FLAG_VERSION = Version.V_8_3_0; + private static final TransportVersion INDICES_FLAG_VERSION = TransportVersion.V_8_3_0; public static final int NO_LIMIT = -1; @@ -112,7 +112,7 @@ public GetSnapshotsRequest(String... repositories) { public GetSnapshotsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { + if (in.getTransportVersion().onOrAfter(MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { repositories = in.readStringArray(); } else { repositories = new String[] { in.readString() }; @@ -120,21 +120,21 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { snapshots = in.readStringArray(); ignoreUnavailable = in.readBoolean(); verbose = in.readBoolean(); - if (in.getVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { + if (in.getTransportVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { after = in.readOptionalWriteable(After::new); sort = in.readEnum(SortBy.class); size = in.readVInt(); order = SortOrder.readFromStream(in); - if (in.getVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { + if (in.getTransportVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { offset = in.readVInt(); } - if (in.getVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { + if (in.getTransportVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { policies = in.readStringArray(); } - if (in.getVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { + if (in.getTransportVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { fromSortValue = in.readOptionalString(); } - if (in.getVersion().onOrAfter(INDICES_FLAG_VERSION)) { + if (in.getTransportVersion().onOrAfter(INDICES_FLAG_VERSION)) { includeIndexNames = in.readBoolean(); } } @@ -143,7 +143,7 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { + if (out.getTransportVersion().onOrAfter(MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { out.writeStringArray(repositories); } else { if (repositories.length != 1) { @@ -158,40 +158,44 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(snapshots); out.writeBoolean(ignoreUnavailable); out.writeBoolean(verbose); - if (out.getVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { + if (out.getTransportVersion().onOrAfter(PAGINATED_GET_SNAPSHOTS_VERSION)) { out.writeOptionalWriteable(after); if ((sort == SortBy.SHARDS || sort == SortBy.FAILED_SHARDS || sort == SortBy.REPOSITORY) - && out.getVersion().before(SORT_BY_SHARDS_OR_REPO_VERSION)) { + && out.getTransportVersion().before(SORT_BY_SHARDS_OR_REPO_VERSION)) { throw new IllegalArgumentException( - "can't use sort by shard count or repository name with node version [" + out.getVersion() + "]" + "can't use sort by shard count or repository name in transport version [" + out.getTransportVersion() + "]" ); } out.writeEnum(sort); out.writeVInt(size); order.writeTo(out); - if (out.getVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { + if (out.getTransportVersion().onOrAfter(NUMERIC_PAGINATION_VERSION)) { out.writeVInt(offset); } else if (offset != 0) { throw new IllegalArgumentException( - "can't use numeric offset in get snapshots request with node version [" + out.getVersion() + "]" + "can't use numeric offset in get snapshots request in transport version [" + out.getTransportVersion() + "]" ); } } else if (sort != SortBy.START_TIME || size != NO_LIMIT || after != null || order != SortOrder.ASC) { - throw new IllegalArgumentException("can't use paginated get snapshots request with node version [" + out.getVersion() + "]"); + throw new IllegalArgumentException( + "can't use paginated get snapshots request in transport version [" + out.getTransportVersion() + "]" + ); } - if (out.getVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { + if (out.getTransportVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { out.writeStringArray(policies); } else if (policies.length > 0) { throw new IllegalArgumentException( - "can't use slm policy filter in snapshots request with node version [" + out.getVersion() + "]" + "can't use slm policy filter in snapshots request in transport version [" + out.getTransportVersion() + "]" ); } - if (out.getVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { + if (out.getTransportVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { out.writeOptionalString(fromSortValue); } else if (fromSortValue != null) { - throw new IllegalArgumentException("can't use after-value in snapshot request with node version [" + out.getVersion() + "]"); + throw new IllegalArgumentException( + "can't use after-value in snapshot request in transport version [" + out.getTransportVersion() + "]" + ); } - if (out.getVersion().onOrAfter(INDICES_FLAG_VERSION)) { + if (out.getTransportVersion().onOrAfter(INDICES_FLAG_VERSION)) { out.writeBoolean(includeIndexNames); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 026435f3a9b2..91b1740777d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -33,7 +33,7 @@ /** * Get snapshots response */ -public class GetSnapshotsResponse extends ActionResponse implements ChunkedToXContent { +public class GetSnapshotsResponse extends ActionResponse implements ChunkedToXContentObject { private static final int UNKNOWN_COUNT = -1; @@ -93,7 +93,7 @@ public GetSnapshotsResponse( public GetSnapshotsResponse(StreamInput in) throws IOException { this.snapshots = in.readImmutableList(SnapshotInfo::readFrom); - if (in.getVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { + if (in.getTransportVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { final Map failedResponses = in.readMap(StreamInput::readString, StreamInput::readException); this.failures = Collections.unmodifiableMap(failedResponses); this.next = in.readOptionalString(); @@ -101,7 +101,7 @@ public GetSnapshotsResponse(StreamInput in) throws IOException { this.failures = Collections.emptyMap(); this.next = null; } - if (in.getVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { + if (in.getTransportVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { this.total = in.readVInt(); this.remaining = in.readVInt(); } else { @@ -132,7 +132,7 @@ public String next() { } /** - * Returns true if there is a least one failed response. + * Returns true if there is at least one failed response. */ public boolean isFailed() { return failures.isEmpty() == false; @@ -149,7 +149,7 @@ public int remaining() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeList(snapshots); - if (out.getVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { + if (out.getTransportVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { out.writeMap(failures, StreamOutput::writeString, StreamOutput::writeException); out.writeOptionalString(next); } else { @@ -158,14 +158,14 @@ public void writeTo(StreamOutput out) throws IOException { throw failures.values().iterator().next(); } } - if (out.getVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { + if (out.getTransportVersion().onOrAfter(GetSnapshotsRequest.NUMERIC_PAGINATION_VERSION)) { out.writeVInt(total); out.writeVInt(remaining); } } @Override - public Iterator toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(Iterators.single((b, p) -> { b.startObject(); b.startArray("snapshots"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 1235afc3b860..33fd38c9f9a4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.StepListener; import org.elasticsearch.action.admin.cluster.repositories.get.TransportGetRepositoriesAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -26,8 +26,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Tuple; import org.elasticsearch.repositories.GetSnapshotInfoContext; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; @@ -53,12 +53,12 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; +import java.util.Queue; import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiPredicate; import java.util.function.Predicate; import java.util.function.ToLongFunction; -import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -112,12 +112,7 @@ protected void masterOperation( getMultipleReposSnapshotInfo( request.isSingleRepositoryRequest() == false, state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY), - maybeFilterRepositories( - TransportGetRepositoriesAction.getRepositories(state, request.repositories()), - request.sort(), - request.order(), - request.fromSortValue() - ), + TransportGetRepositoriesAction.getRepositories(state, request.repositories()), request.snapshots(), request.ignoreUnavailable(), request.verbose(), @@ -127,6 +122,7 @@ protected void masterOperation( request.offset(), request.size(), request.order(), + request.fromSortValue(), SnapshotPredicates.fromRequest(request), request.includeIndexNames(), listener @@ -156,7 +152,7 @@ private static List maybeFilterRepositories( private void getMultipleReposSnapshotInfo( boolean isMultiRepoRequest, SnapshotsInProgress snapshotsInProgress, - List repos, + TransportGetRepositoriesAction.RepositoriesResult repositoriesResult, String[] snapshots, boolean ignoreUnavailable, boolean verbose, @@ -166,66 +162,72 @@ private void getMultipleReposSnapshotInfo( int offset, int size, SortOrder order, + String fromSortValue, SnapshotPredicates predicates, boolean indices, ActionListener listener ) { - // short-circuit if there are no repos, because we can not create GroupedActionListener of size 0 - if (repos.isEmpty()) { - listener.onResponse(new GetSnapshotsResponse(Collections.emptyList(), Collections.emptyMap(), null, 0, 0)); - return; - } - final GroupedActionListener, SnapshotsInRepo>> groupedActionListener = - new GroupedActionListener<>(listener.map(responses -> { - assert repos.size() == responses.size(); - final List allSnapshots = responses.stream() - .map(Tuple::v2) - .filter(Objects::nonNull) - .flatMap(snapshotsInRepo -> snapshotsInRepo.snapshotInfos.stream()) - .toList(); - final Map failures = responses.stream() - .map(Tuple::v1) - .filter(Objects::nonNull) - .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); - final SnapshotsInRepo snInfos = sortSnapshots(allSnapshots, sortBy, after, offset, size, order); - final List snapshotInfos = snInfos.snapshotInfos; - final int remaining = snInfos.remaining + responses.stream() - .map(Tuple::v2) - .filter(Objects::nonNull) - .mapToInt(s -> s.remaining) - .sum(); - return new GetSnapshotsResponse( - indices ? snapshotInfos : snapshotInfos.stream().map(SnapshotInfo::withoutIndices).toList(), - failures, - remaining > 0 - ? GetSnapshotsRequest.After.from(snapshotInfos.get(snapshotInfos.size() - 1), sortBy).asQueryParam() - : null, - responses.stream().map(Tuple::v2).filter(Objects::nonNull).mapToInt(s -> s.totalCount).sum(), - remaining - ); - }), repos.size()); - - for (final RepositoryMetadata repo : repos) { - final String repoName = repo.name(); - getSingleRepoSnapshotInfo( - snapshotsInProgress, - repoName, - snapshots, - predicates, - ignoreUnavailable, - verbose, - cancellableTask, + // Process the missing repositories + final Map failures = ConcurrentCollections.newConcurrentMap(); + for (String missingRepo : repositoriesResult.missing()) { + failures.put(missingRepo, new RepositoryMissingException(missingRepo)); + } + + final Queue> allSnapshotInfos = ConcurrentCollections.newQueue(); + final var remaining = new AtomicInteger(); + final var totalCount = new AtomicInteger(); + + List repositories = maybeFilterRepositories(repositoriesResult.metadata(), sortBy, order, fromSortValue); + try (var listeners = new RefCountingListener(listener.map(ignored -> { + cancellableTask.ensureNotCancelled(); + final var sortedSnapshotsInRepos = sortSnapshots( + allSnapshotInfos.stream().flatMap(Collection::stream), + totalCount.get(), sortBy, after, - order, - groupedActionListener.delegateResponse((groupedListener, e) -> { - if (isMultiRepoRequest && e instanceof ElasticsearchException) { - groupedListener.onResponse(Tuple.tuple(Tuple.tuple(repoName, (ElasticsearchException) e), null)); - } else { - groupedListener.onFailure(e); - } - }).map(snInfos -> Tuple.tuple(null, snInfos)) + offset, + size, + order + ); + final var snapshotInfos = sortedSnapshotsInRepos.snapshotInfos(); + final int finalRemaining = sortedSnapshotsInRepos.remaining() + remaining.get(); + return new GetSnapshotsResponse( + indices ? snapshotInfos : snapshotInfos.stream().map(SnapshotInfo::withoutIndices).toList(), + failures, + finalRemaining > 0 + ? GetSnapshotsRequest.After.from(snapshotInfos.get(snapshotInfos.size() - 1), sortBy).asQueryParam() + : null, + totalCount.get(), + finalRemaining ); + }))) { + for (final RepositoryMetadata repository : repositories) { + final String repoName = repository.name(); + getSingleRepoSnapshotInfo( + snapshotsInProgress, + repoName, + snapshots, + predicates, + ignoreUnavailable, + verbose, + cancellableTask, + sortBy, + after, + order, + listeners.acquire((SnapshotsInRepo snapshotsInRepo) -> { + allSnapshotInfos.add(snapshotsInRepo.snapshotInfos()); + remaining.addAndGet(snapshotsInRepo.remaining()); + totalCount.addAndGet(snapshotsInRepo.totalCount()); + }).delegateResponse((l, e) -> { + if (isMultiRepoRequest && e instanceof ElasticsearchException elasticsearchException) { + failures.put(repoName, elasticsearchException); + l.onResponse(SnapshotsInRepo.EMPTY); + } else { + l.onFailure(e); + } + }) + ); + } } } @@ -582,6 +584,18 @@ private static SnapshotsInRepo sortSnapshots( int offset, int size, SortOrder order + ) { + return sortSnapshots(snapshotInfos.stream(), snapshotInfos.size(), sortBy, after, offset, size, order); + } + + private static SnapshotsInRepo sortSnapshots( + Stream infos, + int totalCount, + GetSnapshotsRequest.SortBy sortBy, + @Nullable GetSnapshotsRequest.After after, + int offset, + int size, + SortOrder order ) { final Comparator comparator = switch (sortBy) { case START_TIME -> BY_START_TIME; @@ -593,8 +607,6 @@ private static SnapshotsInRepo sortSnapshots( case REPOSITORY -> BY_REPOSITORY; }; - Stream infos = snapshotInfos.stream(); - if (after != null) { assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; infos = infos.filter(buildAfterPredicate(sortBy, after, order)); @@ -610,7 +622,7 @@ private static SnapshotsInRepo sortSnapshots( final List resultSet = size != GetSnapshotsRequest.NO_LIMIT && size < snapshots.size() ? snapshots.subList(0, size) : snapshots; - return new SnapshotsInRepo(resultSet, snapshotInfos.size(), allSnapshots.size() - resultSet.size()); + return new SnapshotsInRepo(resultSet, totalCount, allSnapshots.size() - resultSet.size()); } private static Predicate buildAfterPredicate( @@ -861,6 +873,6 @@ private static Predicate filterByLongOffset(ToLongFunction snapshotInfos, int totalCount, int remaining) { - + private static final SnapshotsInRepo EMPTY = new SnapshotsInRepo(List.of(), 0, 0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java index 6cc255956f20..a58e1a778295 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/TransportGetShardSnapshotAction.java @@ -85,8 +85,8 @@ protected void masterOperation( } GroupedActionListener, RepositoryException>> groupedActionListener = new GroupedActionListener<>( - listener.map(TransportGetShardSnapshotAction::transformToResponse), - repositories.size() + repositories.size(), + listener.map(TransportGetShardSnapshotAction::transformToResponse) ); BlockingQueue repositoriesQueue = new LinkedBlockingQueue<>(repositories); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index a561ac48ed79..259a70fa6ff3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -48,7 +48,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(Iterators.single((ToXContent) (b, p) -> { b.startObject() .field(SNAPSHOT, snapshot.getSnapshotId().getName()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 13069119d3b9..a8dd1d54d3a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -22,16 +22,13 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.stream.StreamSupport; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; /** * Snapshot status response */ -public class SnapshotsStatusResponse extends ActionResponse implements ChunkedToXContent { +public class SnapshotsStatusResponse extends ActionResponse implements ChunkedToXContentObject { private final List snapshots; @@ -89,12 +86,10 @@ public int hashCode() { } @Override - public Iterator toXContentChunked() { - return Iterators.concat( - Iterators.single((ToXContent) (b, p) -> b.startObject().startArray("snapshots")), - snapshots.stream() - .flatMap(s -> StreamSupport.stream(Spliterators.spliteratorUnknownSize(s.toXContentChunked(), Spliterator.ORDERED), false)) - .iterator(), + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + Iterators.single((b, p) -> b.startObject().startArray("snapshots")), + Iterators.flatMap(snapshots.iterator(), s -> s.toXContentChunked(params)), Iterators.single((b, p) -> b.endArray().endObject()) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 55e98327a0a4..c0d69c0bdcf2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -142,10 +142,9 @@ protected void masterOperation( TransportNodesSnapshotsStatus.TYPE, new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(Strings.EMPTY_ARRAY)).snapshots(snapshots) .timeout(request.masterNodeTimeout()), + // fork to snapshot meta since building the response is expensive for large snapshots new ThreadedActionListener<>( - logger, - threadPool, - ThreadPool.Names.SNAPSHOT_META, // fork to snapshot meta since building the response is expensive for large snapshots + threadPool.executor(ThreadPool.Names.SNAPSHOT_META), ActionListener.wrap( nodeSnapshotStatuses -> buildResponse( snapshotsInProgress, @@ -156,8 +155,7 @@ protected void masterOperation( listener ), listener::onFailure - ), - false + ) ) ); } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 48370ca2199c..c6af59caa7f1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -123,13 +123,11 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - try { - if (cancellableTask.notifyIfCancelled(listener) == false) { - listener.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); + ActionListener.run(listener, l -> { + if (cancellableTask.notifyIfCancelled(l) == false) { + l.onResponse(new ClusterStateResponse(state.getClusterName(), null, true)); } - } catch (Exception e) { - listener.onFailure(e); - } + }); } }, clusterState -> cancellableTask.isCancelled() || acceptableClusterStateOrFailedPredicate.test(clusterState)); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 8c3634538d0e..f233cf57961c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -38,7 +38,7 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); - if (in.getVersion().onOrAfter(Version.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { searchUsageStats = new SearchUsageStats(in); } else { searchUsageStats = new SearchUsageStats(); @@ -101,7 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { nodeInfo.writeTo(out); nodeStats.writeTo(out); out.writeArray(shardsStats); - if (out.getVersion().onOrAfter(Version.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { searchUsageStats.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index f095c5d915cf..9d5df6ef1e4e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterName; @@ -41,7 +41,7 @@ public ClusterStatsResponse(StreamInput in) throws IOException { MappingStats mappingStats = in.readOptionalWriteable(MappingStats::new); AnalysisStats analysisStats = in.readOptionalWriteable(AnalysisStats::new); VersionStats versionStats = null; - if (in.getVersion().onOrAfter(Version.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { versionStats = in.readOptionalWriteable(VersionStats::new); } this.clusterUUID = clusterUUID; @@ -105,7 +105,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(clusterUUID); out.writeOptionalWriteable(indicesStats.getMappings()); out.writeOptionalWriteable(indicesStats.getAnalysis()); - if (out.getVersion().onOrAfter(Version.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { out.writeOptionalWriteable(indicesStats.getVersions()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index c509d7a3e88b..bc0ed5e70c59 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -207,7 +207,7 @@ private static int countOccurrences(String script, Pattern pattern) { } MappingStats(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { totalFieldCount = in.readOptionalVLong(); totalDeduplicatedFieldCount = in.readOptionalVLong(); totalMappingSizeBytes = in.readOptionalVLong(); @@ -222,7 +222,7 @@ private static int countOccurrences(String script, Pattern pattern) { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeOptionalVLong(totalFieldCount); out.writeOptionalVLong(totalDeduplicatedFieldCount); out.writeOptionalVLong(totalMappingSizeBytes); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index 04fc4075380a..a8fc72afb146 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -10,16 +10,17 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.service.PendingClusterTask; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.Iterator; import java.util.List; -public class PendingClusterTasksResponse extends ActionResponse implements Iterable, ToXContentObject { +public class PendingClusterTasksResponse extends ActionResponse implements ChunkedToXContentObject { private final List pendingTasks; @@ -36,23 +37,11 @@ public List pendingTasks() { return pendingTasks; } - /** - * The pending cluster tasks - */ - public List getPendingTasks() { - return pendingTasks(); - } - - @Override - public Iterator iterator() { - return pendingTasks.iterator(); - } - @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("tasks: (").append(pendingTasks.size()).append("):\n"); - for (PendingClusterTask pendingClusterTask : this) { + for (PendingClusterTask pendingClusterTask : pendingTasks) { sb.append(pendingClusterTask.getInsertOrder()) .append("/") .append(pendingClusterTask.getPriority()) @@ -66,10 +55,12 @@ public String toString() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startArray(Fields.TASKS); - for (PendingClusterTask pendingClusterTask : this) { + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat(Iterators.single((builder, p) -> { + builder.startObject(); + builder.startArray(Fields.TASKS); + return builder; + }), pendingTasks.stream().map(pendingClusterTask -> (builder, p) -> { builder.startObject(); builder.field(Fields.INSERT_ORDER, pendingClusterTask.getInsertOrder()); builder.field(Fields.PRIORITY, pendingClusterTask.getPriority()); @@ -78,10 +69,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TIME_IN_QUEUE_MILLIS, pendingClusterTask.getTimeInQueueInMillis()); builder.field(Fields.TIME_IN_QUEUE, pendingClusterTask.getTimeInQueue()); builder.endObject(); - } - builder.endArray(); - builder.endObject(); - return builder; + return builder; + }).iterator(), Iterators.single((builder, p) -> { + builder.endArray(); + builder.endObject(); + return builder; + })); } static final class Fields { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 098c0de05877..d594ab670106 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -27,7 +26,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.List; /** * Indices clear cache action. @@ -62,20 +60,20 @@ public TransportClearIndicesCacheAction( @Override protected EmptyResult readShardResult(StreamInput in) throws IOException { - return EmptyResult.readEmptyResultFrom(in); + return EmptyResult.INSTANCE; } @Override - protected ClearIndicesCacheResponse newResponse( + protected ResponseFactory getResponseFactory( ClearIndicesCacheRequest request, - int totalShards, - int successfulShards, - int failedShards, - List responses, - List shardFailures, ClusterState clusterState ) { - return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures); + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new ClearIndicesCacheResponse( + totalShards, + successfulShards, + failedShards, + shardFailures + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 9e3cb34e9a5d..8a46daa45e73 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -190,7 +190,7 @@ public CreateIndexClusterStateUpdateRequest performReroute(boolean performRerout } /** - * @return The composable index template that matches with the index that will be cretaed by this request. + * @return The composable index template that matches with the index that will be created by this request. */ public ComposableIndexTemplate matchingTemplate() { return matchingTemplate; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 985115cca377..5e75cc1c10be 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -82,7 +82,7 @@ public CreateIndexRequest(StreamInput in) throws IOException { cause = in.readString(); index = in.readString(); settings = readSettingsFromStream(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { int size = in.readVInt(); assert size <= 1 : "Expected to read 0 or 1 mappings, but received " + size; if (size == 1) { @@ -100,7 +100,7 @@ public CreateIndexRequest(StreamInput in) throws IOException { aliases.add(new Alias(in)); } waitForActiveShards = ActiveShardCount.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { origin = in.readString(); } } @@ -453,7 +453,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(cause); out.writeString(index); settings.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { if ("{}".equals(mappings)) { out.writeVInt(0); } else { @@ -466,7 +466,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(aliases); waitForActiveShards.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { out.writeString(origin); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java index ba47c28c869d..1b5d58491438 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java @@ -20,11 +20,13 @@ import org.apache.lucene.codecs.TermVectorsReader; import org.apache.lucene.codecs.lucene90.Lucene90PostingsFormat; import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.ByteVectorValues; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FloatVectorValues; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; @@ -38,7 +40,6 @@ import org.apache.lucene.index.TermState; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; @@ -137,7 +138,7 @@ void analyzeStoredFields(SegmentReader reader, IndexDiskUsageStats stats) throws final int skipMask = 0x1FF; // 511 while (docID < reader.maxDoc()) { cancellationChecker.logEvent(); - storedFieldsReader.visitDocument(docID, visitor); + storedFieldsReader.document(docID, visitor); // As we already estimate the size of stored fields, we can trade off the accuracy for the speed of the estimate. // Here we only visit 1/11 documents instead of all documents. Ideally, we should visit 1 doc then skip 10 docs // to avoid missing many skew documents. But, documents are stored in chunks in compressed format and a chunk can @@ -525,23 +526,47 @@ void analyzeKnnVectors(SegmentReader reader, IndexDiskUsageStats stats) throws I cancellationChecker.checkForCancellation(); directory.resetBytesRead(); if (field.getVectorDimension() > 0) { - iterateDocValues(reader.maxDoc(), () -> vectorReader.getVectorValues(field.name), vectors -> { - cancellationChecker.logEvent(); - vectors.vectorValue(); - }); - - // do a couple of randomized searches to figure out min and max offsets of index file - VectorValues vectorValues = vectorReader.getVectorValues(field.name); - int numDocsToVisit = reader.maxDoc() < 10 ? reader.maxDoc() : 10 * (int) Math.log10(reader.maxDoc()); - int skipFactor = Math.max(reader.maxDoc() / numDocsToVisit, 1); - for (int i = 0; i < reader.maxDoc(); i += skipFactor) { - if ((i = vectorValues.advance(i)) == DocIdSetIterator.NO_MORE_DOCS) { - break; + switch (field.getVectorEncoding()) { + case BYTE -> { + iterateDocValues(reader.maxDoc(), () -> vectorReader.getByteVectorValues(field.name), vectors -> { + cancellationChecker.logEvent(); + vectors.vectorValue(); + }); + + // do a couple of randomized searches to figure out min and max offsets of index file + ByteVectorValues vectorValues = vectorReader.getByteVectorValues(field.name); + int numDocsToVisit = reader.maxDoc() < 10 ? reader.maxDoc() : 10 * (int) Math.log10(reader.maxDoc()); + int skipFactor = Math.max(reader.maxDoc() / numDocsToVisit, 1); + for (int i = 0; i < reader.maxDoc(); i += skipFactor) { + if ((i = vectorValues.advance(i)) == DocIdSetIterator.NO_MORE_DOCS) { + break; + } + cancellationChecker.checkForCancellation(); + vectorReader.search(field.name, vectorValues.vectorValue(), 100, null, Integer.MAX_VALUE); + } + stats.addKnnVectors(field.name, directory.getBytesRead()); + } + case FLOAT32 -> { + iterateDocValues(reader.maxDoc(), () -> vectorReader.getFloatVectorValues(field.name), vectors -> { + cancellationChecker.logEvent(); + vectors.vectorValue(); + }); + + // do a couple of randomized searches to figure out min and max offsets of index file + FloatVectorValues vectorValues = vectorReader.getFloatVectorValues(field.name); + int numDocsToVisit = reader.maxDoc() < 10 ? reader.maxDoc() : 10 * (int) Math.log10(reader.maxDoc()); + int skipFactor = Math.max(reader.maxDoc() / numDocsToVisit, 1); + for (int i = 0; i < reader.maxDoc(); i += skipFactor) { + if ((i = vectorValues.advance(i)) == DocIdSetIterator.NO_MORE_DOCS) { + break; + } + cancellationChecker.checkForCancellation(); + vectorReader.search(field.name, vectorValues.vectorValue(), 100, null, Integer.MAX_VALUE); + } + stats.addKnnVectors(field.name, directory.getBytesRead()); } - cancellationChecker.checkForCancellation(); - vectorReader.search(field.name, vectorValues.vectorValue(), 100, null, Integer.MAX_VALUE); } - stats.addKnnVectors(field.name, directory.getBytesRead()); + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java index 74b3240bdc2b..7534f6653e47 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.diskusage; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -189,7 +189,7 @@ private PerFieldDiskUsage(StreamInput in) throws IOException { pointsBytes = in.readVLong(); normsBytes = in.readVLong(); termVectorsBytes = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { knnVectorsBytes = in.readVLong(); } } @@ -202,7 +202,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pointsBytes); out.writeVLong(normsBytes); out.writeVLong(termVectorsBytes); - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeVLong(knnVectorsBytes); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index a828f6e413d7..6fce79e31b91 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.List; @@ -46,15 +47,11 @@ public TransportFlushAction( client, actionFilters, indexNameExpressionResolver, - TransportShardFlushAction.TYPE + TransportShardFlushAction.TYPE, + ThreadPool.Names.FLUSH ); } - @Override - protected ReplicationResponse newShardResponse() { - return new ReplicationResponse(); - } - @Override protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId) { return new ShardFlushRequest(request, shardId); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 32e67e95d193..6dca3285269e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -103,7 +103,7 @@ private static class PreShardSyncedFlushRequest extends TransportRequest { private PreShardSyncedFlushRequest(StreamInput in) throws IOException { super(in); - assert in.getVersion().before(Version.V_8_0_0) : "received pre_sync request from a new node"; + assert in.getTransportVersion().before(TransportVersion.V_8_0_0) : "received pre_sync request from a new node"; this.shardId = new ShardId(in); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index af416fa95a86..770b4a64ad24 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.common.UUIDs; @@ -48,7 +48,7 @@ public static final class Defaults { */ private boolean shouldStoreResult; - private static final Version FORCE_MERGE_UUID_SIMPLE_VERSION = Version.V_8_0_0; + private static final TransportVersion FORCE_MERGE_UUID_SIMPLE_VERSION = TransportVersion.V_8_0_0; /** * Force merge UUID to store in the live commit data of a shard under @@ -71,7 +71,7 @@ public ForceMergeRequest(StreamInput in) throws IOException { maxNumSegments = in.readInt(); onlyExpungeDeletes = in.readBoolean(); flush = in.readBoolean(); - if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_SIMPLE_VERSION)) { + if (in.getTransportVersion().onOrAfter(FORCE_MERGE_UUID_SIMPLE_VERSION)) { forceMergeUUID = in.readString(); } else { forceMergeUUID = in.readOptionalString(); @@ -167,7 +167,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxNumSegments); out.writeBoolean(onlyExpungeDeletes); out.writeBoolean(flush); - if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_SIMPLE_VERSION)) { + if (out.getTransportVersion().onOrAfter(FORCE_MERGE_UUID_SIMPLE_VERSION)) { out.writeString(forceMergeUUID); } else { out.writeOptionalString(forceMergeUUID); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index cd5713e69bdd..8c97bd29ebea 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -30,7 +29,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.List; /** * ForceMerge index/indices action. @@ -66,20 +64,17 @@ public TransportForceMergeAction( @Override protected EmptyResult readShardResult(StreamInput in) throws IOException { - return EmptyResult.readEmptyResultFrom(in); + return EmptyResult.INSTANCE; } @Override - protected ForceMergeResponse newResponse( - ForceMergeRequest request, - int totalShards, - int successfulShards, - int failedShards, - List responses, - List shardFailures, - ClusterState clusterState - ) { - return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures); + protected ResponseFactory getResponseFactory(ForceMergeRequest request, ClusterState clusterState) { + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new ForceMergeResponse( + totalShards, + successfulShards, + failedShards, + shardFailures + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 4f8f24ea5b72..a36a7e606866 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -8,21 +8,23 @@ package org.elasticsearch.action.admin.indices.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.Arrays; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -33,7 +35,7 @@ /** * A response for a get index action. */ -public class GetIndexResponse extends ActionResponse implements ToXContentObject { +public class GetIndexResponse extends ActionResponse implements ChunkedToXContentObject { private Map mappings = Map.of(); private Map> aliases = Map.of(); @@ -73,7 +75,7 @@ public GetIndexResponse( GetIndexResponse(StreamInput in) throws IOException { super(in); this.indices = in.readStringArray(); - mappings = in.readImmutableOpenMap(StreamInput::readString, in.getVersion().before(Version.V_8_0_0) ? i -> { + mappings = in.readImmutableOpenMap(StreamInput::readString, in.getTransportVersion().before(TransportVersion.V_8_0_0) ? i -> { int numMappings = i.readVInt(); assert numMappings == 0 || numMappings == 1 : "Expected 0 or 1 mappings but got " + numMappings; if (numMappings == 1) { @@ -178,59 +180,58 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - for (final String index : indices) { + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat( + Iterators.single((builder, params) -> builder.startObject()), + Arrays.stream(indices).map(index -> (builder, params) -> { builder.startObject(index); - { - builder.startObject("aliases"); - List indexAliases = aliases.get(index); - if (indexAliases != null) { - for (final AliasMetadata alias : indexAliases) { - AliasMetadata.Builder.toXContent(alias, builder, params); - } - } - builder.endObject(); - - MappingMetadata indexMappings = mappings.get(index); - if (indexMappings == null) { - builder.startObject("mappings").endObject(); - } else { - if (builder.getRestApiVersion() == RestApiVersion.V_7 - && params.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY)) { - builder.startObject("mappings"); - builder.field(MapperService.SINGLE_MAPPING_NAME, indexMappings.sourceAsMap()); - builder.endObject(); - } else { - builder.field("mappings", indexMappings.sourceAsMap()); - } - } - builder.startObject("settings"); - Settings indexSettings = settings.get(index); - if (indexSettings != null) { - indexSettings.toXContent(builder, params); + builder.startObject("aliases"); + List indexAliases = aliases.get(index); + if (indexAliases != null) { + for (final AliasMetadata alias : indexAliases) { + AliasMetadata.Builder.toXContent(alias, builder, params); } - builder.endObject(); + } + builder.endObject(); - Settings defaultIndexSettings = defaultSettings.get(index); - if (defaultIndexSettings != null && defaultIndexSettings.isEmpty() == false) { - builder.startObject("defaults"); - defaultIndexSettings.toXContent(builder, params); + MappingMetadata indexMappings = mappings.get(index); + if (indexMappings == null) { + builder.startObject("mappings").endObject(); + } else { + if (builder.getRestApiVersion() == RestApiVersion.V_7 + && params.paramAsBoolean(INCLUDE_TYPE_NAME_PARAMETER, DEFAULT_INCLUDE_TYPE_NAME_POLICY)) { + builder.startObject("mappings"); + builder.field(MapperService.SINGLE_MAPPING_NAME, indexMappings.sourceAsMap()); builder.endObject(); + } else { + builder.field("mappings", indexMappings.sourceAsMap()); } + } - String dataStream = dataStreams.get(index); - if (dataStream != null) { - builder.field("data_stream", dataStream); - } + builder.startObject("settings"); + Settings indexSettings = settings.get(index); + if (indexSettings != null) { + indexSettings.toXContent(builder, params); } builder.endObject(); - } - } - builder.endObject(); - return builder; + + Settings defaultIndexSettings = defaultSettings.get(index); + if (defaultIndexSettings != null && defaultIndexSettings.isEmpty() == false) { + builder.startObject("defaults"); + defaultIndexSettings.toXContent(builder, params); + builder.endObject(); + } + + String dataStream = dataStreams.get(index); + if (dataStream != null) { + builder.field("data_stream", dataStream); + } + + return builder.endObject(); + }).iterator(), + Iterators.single((builder, params) -> builder.endObject()) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java index f8fd09fcadfd..47a17006dc4b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; @@ -28,12 +28,12 @@ public class GetFieldMappingsIndexRequest extends SingleShardRequest { - if (mapIn.getVersion().before(Version.V_8_0_0)) { + if (mapIn.getTransportVersion().before(TransportVersion.V_8_0_0)) { int typesSize = mapIn.readVInt(); assert typesSize == 1 || typesSize == 0 : "Expected 0 or 1 types but got " + typesSize; if (typesSize == 0) { @@ -157,7 +157,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(mappings, StreamOutput::writeString, (outpt, map) -> { - if (outpt.getVersion().before(Version.V_8_0_0)) { + if (outpt.getTransportVersion().before(TransportVersion.V_8_0_0)) { outpt.writeVInt(1); outpt.writeString(MapperService.SINGLE_MAPPING_NAME); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index b2360de13238..22688f9f81a6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -8,14 +8,14 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.xcontent.ParseField; @@ -28,7 +28,7 @@ import static org.elasticsearch.rest.BaseRestHandler.DEFAULT_INCLUDE_TYPE_NAME_POLICY; import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; -public class GetMappingsResponse extends ActionResponse implements ChunkedToXContent { +public class GetMappingsResponse extends ActionResponse implements ChunkedToXContentObject { private static final ParseField MAPPINGS = new ParseField("mappings"); @@ -40,7 +40,7 @@ public GetMappingsResponse(Map mappings) { GetMappingsResponse(StreamInput in) throws IOException { super(in); - mappings = in.readImmutableMap(StreamInput::readString, in.getVersion().before(Version.V_8_0_0) ? i -> { + mappings = in.readImmutableMap(StreamInput::readString, in.getTransportVersion().before(TransportVersion.V_8_0_0) ? i -> { int mappingCount = i.readVInt(); assert mappingCount == 1 || mappingCount == 0 : "Expected 0 or 1 mappings but got " + mappingCount; if (mappingCount == 1) { @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Iterator toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params outerParams) { return Iterators.concat( Iterators.single((b, p) -> b.startObject()), getMappings().entrySet().stream().map(indexEntry -> (ToXContent) (builder, params) -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 80a032c4542c..5635b39d0199 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -81,7 +81,7 @@ public PutMappingRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String type = in.readOptionalString(); if (MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { throw new IllegalArgumentException("Expected type [_doc] but received [" + type + "]"); @@ -90,7 +90,7 @@ public PutMappingRequest(StreamInput in) throws IOException { source = in.readString(); concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_7_9_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { writeIndexOnly = in.readBoolean(); } } @@ -313,13 +313,13 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); - if (out.getVersion().onOrAfter(Version.V_7_9_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { out.writeBoolean(writeIndexOnly); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index e53f17b68559..ea04ccf62763 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.xcontent.ToXContent; @@ -26,7 +26,7 @@ /** * Information regarding the recovery state of indices and their associated shards. */ -public class RecoveryResponse extends BaseBroadcastResponse implements ChunkedToXContent { +public class RecoveryResponse extends BaseBroadcastResponse implements ChunkedToXContentObject { private final Map> shardRecoveryStates; @@ -65,7 +65,7 @@ public Map> shardRecoveryStates() { } @Override - public Iterator toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat( Iterators.single((b, p) -> b.startObject()), shardRecoveryStates.entrySet() diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index c5d39cdb5c19..e6b1758b1c28 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -71,33 +70,27 @@ protected RecoveryState readShardResult(StreamInput in) throws IOException { } @Override - protected RecoveryResponse newResponse( - RecoveryRequest request, - int totalShards, - int successfulShards, - int failedShards, - List responses, - List shardFailures, - ClusterState clusterState - ) { - Map> shardResponses = new HashMap<>(); - for (RecoveryState recoveryState : responses) { - if (recoveryState == null) { - continue; - } - String indexName = recoveryState.getShardId().getIndexName(); - if (shardResponses.containsKey(indexName) == false) { - shardResponses.put(indexName, new ArrayList<>()); - } - if (request.activeOnly()) { - if (recoveryState.getStage() != RecoveryState.Stage.DONE) { + protected ResponseFactory getResponseFactory(RecoveryRequest request, ClusterState clusterState) { + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> { + Map> shardResponses = new HashMap<>(); + for (RecoveryState recoveryState : responses) { + if (recoveryState == null) { + continue; + } + String indexName = recoveryState.getShardId().getIndexName(); + if (shardResponses.containsKey(indexName) == false) { + shardResponses.put(indexName, new ArrayList<>()); + } + if (request.activeOnly()) { + if (recoveryState.getStage() != RecoveryState.Stage.DONE) { + shardResponses.get(indexName).add(recoveryState); + } + } else { shardResponses.get(indexName).add(recoveryState); } - } else { - shardResponses.get(indexName).add(recoveryState); } - } - return new RecoveryResponse(totalShards, successfulShards, failedShards, shardResponses, shardFailures); + return new RecoveryResponse(totalShards, successfulShards, failedShards, shardResponses, shardFailures); + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index ff9f6640b412..ceb940502da5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.List; @@ -48,15 +49,11 @@ public TransportRefreshAction( client, actionFilters, indexNameExpressionResolver, - TransportShardRefreshAction.TYPE + TransportShardRefreshAction.TYPE, + ThreadPool.Names.REFRESH ); } - @Override - protected ReplicationResponse newShardResponse() { - return new ReplicationResponse(); - } - @Override protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) { BasicReplicationRequest replicationRequest = new BasicReplicationRequest(shardId); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 27e185e98a9f..c7e7ab973382 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -9,30 +9,45 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.function.Predicate; +import java.util.stream.Collectors; public class TransportShardRefreshAction extends TransportReplicationAction< BasicReplicationRequest, BasicReplicationRequest, ReplicationResponse> { + private static final Logger logger = LogManager.getLogger(TransportShardRefreshAction.class); + public static final String NAME = RefreshAction.NAME + "[s]"; public static final ActionType TYPE = new ActionType<>(NAME, ReplicationResponse::new); + public static final String SOURCE_API = "api"; @Inject public TransportShardRefreshAction( @@ -57,6 +72,7 @@ public TransportShardRefreshAction( BasicReplicationRequest::new, ThreadPool.Names.REFRESH ); + new TransportUnpromotableShardRefreshAction(transportService, actionFilters, indicesService); } @Override @@ -70,17 +86,49 @@ protected void shardOperationOnPrimary( IndexShard primary, ActionListener> listener ) { - ActionListener.completeWith(listener, () -> { - primary.refresh("api"); + try (var listeners = new RefCountingListener(listener.map(v -> new PrimaryResult<>(shardRequest, new ReplicationResponse())))) { + var refreshResult = primary.refresh(SOURCE_API); logger.trace("{} refresh request executed on primary", primary.shardId()); - return new PrimaryResult<>(shardRequest, new ReplicationResponse()); - }); + + // Forward the request to all nodes that hold unpromotable replica shards + final ClusterState clusterState = clusterService.state(); + final Task parentTaskId = taskManager.getTask(shardRequest.getParentTask().getId()); + clusterState.routingTable() + .shardRoutingTable(shardRequest.shardId()) + .assignedShards() + .stream() + .filter(Predicate.not(ShardRouting::isPromotableToPrimary)) + .map(ShardRouting::currentNodeId) + .collect(Collectors.toUnmodifiableSet()) + .forEach(nodeId -> { + final DiscoveryNode node = clusterState.nodes().get(nodeId); + UnpromotableShardRefreshRequest request = new UnpromotableShardRefreshRequest( + primary.shardId(), + refreshResult.generation() + ); + logger.trace("forwarding refresh request [{}] to node [{}]", request, node); + transportService.sendChildRequest( + node, + TransportUnpromotableShardRefreshAction.NAME, + request, + parentTaskId, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>( + listeners.acquire(ignored -> {}), + (in) -> TransportResponse.Empty.INSTANCE, + ThreadPool.Names.REFRESH + ) + ); + }); + } catch (Exception e) { + listener.onFailure(e); + } } @Override protected void shardOperationOnReplica(BasicReplicationRequest request, IndexShard replica, ActionListener listener) { ActionListener.completeWith(listener, () -> { - replica.refresh("api"); + replica.refresh(SOURCE_API); logger.trace("{} refresh request executed on replica", replica.shardId()); return new ReplicaResult(); }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java new file mode 100644 index 000000000000..500a53513a60 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportUnpromotableShardRefreshAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class TransportUnpromotableShardRefreshAction extends HandledTransportAction { + public static final String NAME = RefreshAction.NAME + "[u]"; + + private final IndicesService indicesService; + + @Inject + public TransportUnpromotableShardRefreshAction( + TransportService transportService, + ActionFilters actionFilters, + IndicesService indicesService + ) { + super(NAME, transportService, actionFilters, UnpromotableShardRefreshRequest::new, ThreadPool.Names.REFRESH); + this.indicesService = indicesService; + } + + @Override + protected void doExecute(Task task, UnpromotableShardRefreshRequest request, ActionListener responseListener) { + ActionListener.run(responseListener, listener -> { + assert request.getSegmentGeneration() != Engine.RefreshResult.UNKNOWN_GENERATION + : "The request segment is " + request.getSegmentGeneration(); + IndexShard shard = indicesService.indexServiceSafe(request.getShardId().getIndex()).getShard(request.getShardId().id()); + shard.waitForSegmentGeneration(request.getSegmentGeneration(), listener.map(l -> ActionResponse.Empty.INSTANCE)); + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java new file mode 100644 index 000000000000..52ef3917ce72 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/UnpromotableShardRefreshRequest.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; + +import java.io.IOException; + +public class UnpromotableShardRefreshRequest extends ActionRequest { + + private final ShardId shardId; + private final long segmentGeneration; + + public UnpromotableShardRefreshRequest(final ShardId shardId, long segmentGeneration) { + this.shardId = shardId; + this.segmentGeneration = segmentGeneration; + } + + public UnpromotableShardRefreshRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + segmentGeneration = in.readVLong(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeVLong(segmentGeneration); + } + + public ShardId getShardId() { + return shardId; + } + + public long getSegmentGeneration() { + return segmentGeneration; + } + + @Override + public String toString() { + return "UnpromotableShardRefreshRequest{" + "shardId=" + shardId + ", segmentGeneration=" + segmentGeneration + '}'; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index c8c915029574..42f15c073f43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexAbstraction; -import org.elasticsearch.cluster.metadata.IndexAbstractionResolver; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; @@ -31,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; @@ -53,6 +53,7 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; @@ -137,6 +138,7 @@ public boolean allowsRemoteIndices() { @Override public boolean includeDataStreams() { + // request must allow data streams because the index name expression resolver for the action handler assumes it return true; } } @@ -439,7 +441,7 @@ public static class TransportAction extends HandledTransportAction indices = new ArrayList<>(); List aliases = new ArrayList<>(); List dataStreams = new ArrayList<>(); - if (localIndices != null) { - resolveIndices( - localIndices.indices(), - request.indicesOptions, - metadata, - indexAbstractionResolver, - indices, - aliases, - dataStreams, - request.includeDataStreams() - ); - } + resolveIndices(localIndices, clusterState, indexNameExpressionResolver, indices, aliases, dataStreams); if (remoteClusterIndices.size() > 0) { final int remoteRequests = remoteClusterIndices.size(); @@ -513,12 +503,36 @@ protected void doExecute(Task task, Request request, final ActionListener indices, + List aliases, + List dataStreams + ) { + if (localIndices == null) { + return; + } + resolveIndices(localIndices.indices(), localIndices.indicesOptions(), clusterState, resolver, indices, aliases, dataStreams); + } + /** * Resolves the specified names and/or wildcard expressions to index abstractions. Returns results in the supplied lists. * * @param names The names and wildcard expressions to resolve * @param indicesOptions Options for expanding wildcards to indices with different states - * @param metadata Cluster metadata + * @param clusterState Cluster state * @param resolver Resolver instance for matching names * @param indices List containing any matching indices * @param aliases List containing any matching aliases @@ -528,22 +542,33 @@ protected void doExecute(Task task, Request request, final ActionListener indices, List aliases, - List dataStreams, - boolean includeDataStreams + List dataStreams ) { - List resolvedIndexAbstractions = resolver.resolveIndexAbstractions(names, indicesOptions, metadata, includeDataStreams); - SortedMap lookup = metadata.getIndicesLookup(); + // redundant check to ensure that we don't resolve the list of empty names to "all" in this context + if (names.length == 0) { + return; + } + // TODO This is a dirty hack around the IndexNameExpressionResolver optimisation for "*" as described in: + // https://github.com/elastic/elasticsearch/issues/92903. + // A standalone "*" expression is resolved slightly differently from a "*" embedded in another expression, eg "idx,*". + // The difference is only slight, and it usually doesn't cause problems (see + // https://github.com/elastic/elasticsearch/issues/92911 for a description of a problem). + // But in the case of the Resolve index API, the difference is observable, because resolving standalone "*" cannot show + // aliases (only indices and datastreams). The Resolve index API needs to show the aliases that match wildcards. + if (names.length == 1 && (Metadata.ALL.equals(names[0]) || Regex.isMatchAllPattern(names[0]))) { + names = new String[] { "**" }; + } + Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); for (String s : resolvedIndexAbstractions) { - enrichIndexAbstraction(metadata, s, lookup, indices, aliases, dataStreams); + enrichIndexAbstraction(clusterState, s, indices, aliases, dataStreams); } indices.sort(Comparator.comparing(ResolvedIndexAbstraction::getName)); aliases.sort(Comparator.comparing(ResolvedIndexAbstraction::getName)); dataStreams.sort(Comparator.comparing(ResolvedIndexAbstraction::getName)); - } private static void mergeResults( @@ -568,18 +593,17 @@ private static void mergeResults( } private static void enrichIndexAbstraction( - Metadata metadata, + ClusterState clusterState, String indexAbstraction, - SortedMap lookup, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = lookup.get(indexAbstraction); + IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { - IndexMetadata writeIndex = metadata.index(ia.getWriteIndex()); + IndexMetadata writeIndex = clusterState.metadata().index(ia.getWriteIndex()); String[] aliasNames = writeIndex.getAliases().keySet().stream().sorted().toArray(String[]::new); List attributes = new ArrayList<>(); attributes.add(writeIndex.getState() == IndexMetadata.State.OPEN ? Attribute.OPEN : Attribute.CLOSED); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index b61e73bbfa26..ba7d6b03043c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentFragment; @@ -41,7 +41,7 @@ protected Condition(String name, Type type) { * Checks if this condition is available in a specific version. * This makes sure BWC when introducing a new condition which is not recognized by older versions. */ - boolean includedInVersion(Version version) { + boolean includedInVersion(TransportVersion version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java index c27b4a7b7e73..678ec96c217c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MaxPrimaryShardDocsCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_2_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_2_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java index ddcfadd53dd7..98958d3b015c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -64,7 +64,7 @@ public static MinAgeCondition fromXContent(XContentParser parser) throws IOExcep } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java index 9a4fffc17018..8c6274cfadb8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MinDocsCondition fromXContent(XContentParser parser) throws IOExce } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java index e1aee305742f..6aaea57e5b55 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +62,7 @@ public static MinPrimaryShardDocsCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java index 5ec8d26d9672..d7149e2a91be 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,7 +63,7 @@ public static MinPrimaryShardSizeCondition fromXContent(XContentParser parser) t } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java index 82cf3c0daf30..52db7ff90cf2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.rollover; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,7 +63,7 @@ public static MinSizeCondition fromXContent(XContentParser parser) throws IOExce } @Override - boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_8_4_0); + boolean includedInVersion(TransportVersion version) { + return version.onOrAfter(TransportVersion.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 9916acbef125..fd773a9e19b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -212,7 +212,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(newIndexName); out.writeBoolean(dryRun); out.writeCollection( - conditions.values().stream().filter(c -> c.includedInVersion(out.getVersion())).toList(), + conditions.values().stream().filter(c -> c.includedInVersion(out.getTransportVersion())).toList(), StreamOutput::writeNamedWriteable ); createIndexRequest.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index 0c5abb578880..5377a5af883f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -13,15 +13,13 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; +import org.elasticsearch.action.support.broadcast.ChunkedBroadcastResponse; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -33,7 +31,7 @@ import java.util.Locale; import java.util.Map; -public class IndicesSegmentResponse extends BaseBroadcastResponse implements ChunkedToXContent { +public class IndicesSegmentResponse extends ChunkedBroadcastResponse { private final ShardSegments[] shards; @@ -79,72 +77,72 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public Iterator toXContentChunked() { - return Iterators.concat(Iterators.single(((builder, params) -> { - builder.startObject(); - RestActions.buildBroadcastShardsHeader(builder, params, this); - return builder.startObject(Fields.INDICES); - })), getIndices().values().stream().map(indexSegments -> (ToXContent) (builder, params) -> { - builder.startObject(indexSegments.getIndex()); - - builder.startObject(Fields.SHARDS); - for (IndexShardSegments indexSegment : indexSegments) { - builder.startArray(Integer.toString(indexSegment.shardId().id())); - for (ShardSegments shardSegments : indexSegment) { - builder.startObject(); - - builder.startObject(Fields.ROUTING); - builder.field(Fields.STATE, shardSegments.getShardRouting().state()); - builder.field(Fields.PRIMARY, shardSegments.getShardRouting().primary()); - builder.field(Fields.NODE, shardSegments.getShardRouting().currentNodeId()); - if (shardSegments.getShardRouting().relocatingNodeId() != null) { - builder.field(Fields.RELOCATING_NODE, shardSegments.getShardRouting().relocatingNodeId()); - } - builder.endObject(); - - builder.field(Fields.NUM_COMMITTED_SEGMENTS, shardSegments.getNumberOfCommitted()); - builder.field(Fields.NUM_SEARCH_SEGMENTS, shardSegments.getNumberOfSearch()); - - builder.startObject(Fields.SEGMENTS); - for (Segment segment : shardSegments) { - builder.startObject(segment.getName()); - builder.field(Fields.GENERATION, segment.getGeneration()); - builder.field(Fields.NUM_DOCS, segment.getNumDocs()); - builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs()); - builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize()); - if (builder.getRestApiVersion() == RestApiVersion.V_7) { - builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, ByteSizeValue.ZERO); - } - builder.field(Fields.COMMITTED, segment.isCommitted()); - builder.field(Fields.SEARCH, segment.isSearch()); - if (segment.getVersion() != null) { - builder.field(Fields.VERSION, segment.getVersion()); - } - if (segment.isCompound() != null) { - builder.field(Fields.COMPOUND, segment.isCompound()); + protected Iterator customXContentChunks(ToXContent.Params params) { + return Iterators.concat( + Iterators.single((builder, p) -> builder.startObject(Fields.INDICES)), + getIndices().values().stream().map(indexSegments -> (ToXContent) (builder, p) -> { + builder.startObject(indexSegments.getIndex()); + + builder.startObject(Fields.SHARDS); + for (IndexShardSegments indexSegment : indexSegments) { + builder.startArray(Integer.toString(indexSegment.shardId().id())); + for (ShardSegments shardSegments : indexSegment) { + builder.startObject(); + + builder.startObject(Fields.ROUTING); + builder.field(Fields.STATE, shardSegments.getShardRouting().state()); + builder.field(Fields.PRIMARY, shardSegments.getShardRouting().primary()); + builder.field(Fields.NODE, shardSegments.getShardRouting().currentNodeId()); + if (shardSegments.getShardRouting().relocatingNodeId() != null) { + builder.field(Fields.RELOCATING_NODE, shardSegments.getShardRouting().relocatingNodeId()); } - if (segment.getMergeId() != null) { - builder.field(Fields.MERGE_ID, segment.getMergeId()); - } - if (segment.getSegmentSort() != null) { - toXContent(builder, segment.getSegmentSort()); - } - if (segment.attributes != null && segment.attributes.isEmpty() == false) { - builder.field("attributes", segment.attributes); + builder.endObject(); + + builder.field(Fields.NUM_COMMITTED_SEGMENTS, shardSegments.getNumberOfCommitted()); + builder.field(Fields.NUM_SEARCH_SEGMENTS, shardSegments.getNumberOfSearch()); + + builder.startObject(Fields.SEGMENTS); + for (Segment segment : shardSegments) { + builder.startObject(segment.getName()); + builder.field(Fields.GENERATION, segment.getGeneration()); + builder.field(Fields.NUM_DOCS, segment.getNumDocs()); + builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs()); + builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize()); + if (builder.getRestApiVersion() == RestApiVersion.V_7) { + builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, ByteSizeValue.ZERO); + } + builder.field(Fields.COMMITTED, segment.isCommitted()); + builder.field(Fields.SEARCH, segment.isSearch()); + if (segment.getVersion() != null) { + builder.field(Fields.VERSION, segment.getVersion()); + } + if (segment.isCompound() != null) { + builder.field(Fields.COMPOUND, segment.isCompound()); + } + if (segment.getMergeId() != null) { + builder.field(Fields.MERGE_ID, segment.getMergeId()); + } + if (segment.getSegmentSort() != null) { + toXContent(builder, segment.getSegmentSort()); + } + if (segment.attributes != null && segment.attributes.isEmpty() == false) { + builder.field("attributes", segment.attributes); + } + builder.endObject(); } builder.endObject(); - } - builder.endObject(); - builder.endObject(); + builder.endObject(); + } + builder.endArray(); } - builder.endArray(); - } - builder.endObject(); + builder.endObject(); - builder.endObject(); - return builder; - }).iterator(), Iterators.single((builder, params) -> builder.endObject().endObject())); + builder.endObject(); + return builder; + }).iterator(), + Iterators.single((builder, p) -> builder.endObject()) + ); } private static void toXContent(XContentBuilder builder, Sort sort) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 12ac41284588..204474654bb0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +28,7 @@ public IndicesSegmentsRequest() { public IndicesSegmentsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readBoolean(); // old 'verbose' option, since removed } } @@ -40,7 +40,7 @@ public IndicesSegmentsRequest(String... indices) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeBoolean(false); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 470aedb2895f..0b035af943b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -30,7 +29,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.List; public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction< IndicesSegmentsRequest, @@ -83,17 +81,12 @@ protected ShardSegments readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesSegmentResponse newResponse( + protected ResponseFactory getResponseFactory( IndicesSegmentsRequest request, - int totalShards, - int successfulShards, - int failedShards, - List results, - List shardFailures, ClusterState clusterState ) { - return new IndicesSegmentResponse( - results.toArray(new ShardSegments[results.size()]), + return (totalShards, successfulShards, failedShards, results, shardFailures) -> new IndicesSegmentResponse( + results.toArray(new ShardSegments[0]), totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index ad4c49c29a0e..fa95a93183c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -28,7 +28,7 @@ import java.util.Map; import java.util.Objects; -public class GetSettingsResponse extends ActionResponse implements ChunkedToXContent { +public class GetSettingsResponse extends ActionResponse implements ChunkedToXContentObject { private final Map indexToSettings; private final Map indexToDefaultSettings; @@ -154,7 +154,7 @@ public String toString() { } @Override - public Iterator toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params params) { final boolean omitEmptySettings = indexToDefaultSettings.isEmpty(); return toXContentChunked(omitEmptySettings); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java index b8353d5bc970..f52c659ea55f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java @@ -11,6 +11,8 @@ import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest; import org.elasticsearch.common.settings.Settings; +import java.util.Arrays; + /** * Cluster state update request that allows to update settings for some indices */ @@ -51,4 +53,9 @@ public UpdateSettingsClusterStateUpdateRequest settings(Settings settings) { this.settings = settings; return this; } + + @Override + public String toString() { + return Arrays.toString(indices()) + settings; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 6e443744b883..ad9c4904ecc8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -54,7 +54,7 @@ public UpdateSettingsRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); settings = readSettingsFromStream(in); preserveExisting = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { origin = in.readString(); } } @@ -183,7 +183,7 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); settings.writeTo(out); out.writeBoolean(preserveExisting); - if (out.getVersion().onOrAfter(Version.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { out.writeString(origin); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 8169860fd803..9547e5bd8f78 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -19,7 +19,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Map; @@ -238,10 +237,6 @@ public IndicesShardStoresResponse(Map>> s this.failures = failures; } - IndicesShardStoresResponse() { - this(Map.of(), Collections.emptyList()); - } - public IndicesShardStoresResponse(StreamInput in) throws IOException { super(in); storeStatuses = in.readImmutableMap( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 29044fbe0af1..37cbdb9b0dab 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.AllocationStatus; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.client.internal.node.NodeClient; @@ -35,10 +36,9 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Tuple; import org.elasticsearch.gateway.AsyncShardFetch; -import org.elasticsearch.gateway.AsyncShardFetch.Lister; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardId; @@ -140,7 +140,7 @@ private class AsyncShardStoresInfoFetches { private final RoutingNodes routingNodes; private final Set> shards; private final ActionListener listener; - private CountDown expectedOps; + private final RefCountingRunnable refs = new RefCountingRunnable(this::finish); private final Queue fetchResponses; AsyncShardStoresInfoFetches( @@ -154,19 +154,15 @@ private class AsyncShardStoresInfoFetches { this.shards = shards; this.listener = listener; this.fetchResponses = new ConcurrentLinkedQueue<>(); - this.expectedOps = new CountDown(shards.size()); } void start() { - if (shards.isEmpty()) { - listener.onResponse(new IndicesShardStoresResponse()); - } else { - // explicitely type lister, some IDEs (Eclipse) are not able to correctly infer the function type - Lister, NodeGatewayStartedShards> lister = this::listStartedShards; + try { for (Tuple shard : shards) { - InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shard.v1(), shard.v2(), lister); - fetch.fetchData(nodes, Collections.emptySet()); + new InternalAsyncFetch(logger, "shard_stores", shard.v1(), shard.v2()).fetchData(nodes, Collections.emptySet()); } + } finally { + refs.close(); } } @@ -186,14 +182,10 @@ private void listStartedShards( private class InternalAsyncFetch extends AsyncShardFetch { - InternalAsyncFetch( - Logger logger, - String type, - ShardId shardId, - String customDataPath, - Lister, NodeGatewayStartedShards> action - ) { - super(logger, type, shardId, customDataPath, action); + private final Releasable ref = refs.acquire(); + + InternalAsyncFetch(Logger logger, String type, ShardId shardId, String customDataPath) { + super(logger, type, shardId, customDataPath); } @Override @@ -203,63 +195,17 @@ protected synchronized void processAsyncFetch( long fetchingRound ) { fetchResponses.add(new Response(shardId, responses, failures)); - if (expectedOps.countDown()) { - finish(); - } + ref.close(); } - void finish() { - Map>> indicesStatuses = new HashMap<>(); - List failures = new ArrayList<>(); - for (Response fetchResponse : fetchResponses) { - var indexName = fetchResponse.shardId.getIndexName(); - var shardId = fetchResponse.shardId.id(); - var indexStatuses = indicesStatuses.computeIfAbsent(indexName, k -> new HashMap<>()); - var storeStatuses = indexStatuses.computeIfAbsent(shardId, k -> new ArrayList<>()); - - for (NodeGatewayStartedShards r : fetchResponse.responses) { - if (shardExistsInNode(r)) { - var allocationStatus = getAllocationStatus(indexName, shardId, r.getNode()); - storeStatuses.add(new StoreStatus(r.getNode(), r.allocationId(), allocationStatus, r.storeException())); - } - } - - for (FailedNodeException failure : fetchResponse.failures) { - failures.add(new Failure(failure.nodeId(), indexName, shardId, failure.getCause())); - } - } - // make the status structure immutable - indicesStatuses.replaceAll((k, v) -> { - v.replaceAll((s, l) -> { - CollectionUtil.timSort(l); - return List.copyOf(l); - }); - return Map.copyOf(v); - }); - listener.onResponse(new IndicesShardStoresResponse(Map.copyOf(indicesStatuses), List.copyOf(failures))); - } - - private AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { - for (ShardRouting shardRouting : routingNodes.node(node.getId())) { - ShardId shardId = shardRouting.shardId(); - if (shardId.id() == shardID && shardId.getIndexName().equals(index)) { - if (shardRouting.primary()) { - return AllocationStatus.PRIMARY; - } else if (shardRouting.assignedToNode()) { - return AllocationStatus.REPLICA; - } else { - return AllocationStatus.UNUSED; - } - } - } - return AllocationStatus.UNUSED; - } - - /** - * A shard exists/existed in a node only if shard state file exists in the node - */ - private static boolean shardExistsInNode(final NodeGatewayStartedShards response) { - return response.storeException() != null || response.allocationId() != null; + @Override + protected void list( + ShardId shardId, + String customDataPath, + DiscoveryNode[] nodes, + ActionListener> listener + ) { + listStartedShards(shardId, customDataPath, nodes, listener); } @Override @@ -279,5 +225,59 @@ public class Response { } } } + + void finish() { + Map>> indicesStatuses = new HashMap<>(); + List failures = new ArrayList<>(); + for (InternalAsyncFetch.Response fetchResponse : fetchResponses) { + var indexName = fetchResponse.shardId.getIndexName(); + var shardId = fetchResponse.shardId.id(); + var indexStatuses = indicesStatuses.computeIfAbsent(indexName, k -> new HashMap<>()); + var storeStatuses = indexStatuses.computeIfAbsent(shardId, k -> new ArrayList<>()); + + for (NodeGatewayStartedShards r : fetchResponse.responses) { + if (shardExistsInNode(r)) { + var allocationStatus = getAllocationStatus(indexName, shardId, r.getNode()); + storeStatuses.add(new StoreStatus(r.getNode(), r.allocationId(), allocationStatus, r.storeException())); + } + } + + for (FailedNodeException failure : fetchResponse.failures) { + failures.add(new Failure(failure.nodeId(), indexName, shardId, failure.getCause())); + } + } + // make the status structure immutable + indicesStatuses.replaceAll((k, v) -> { + v.replaceAll((s, l) -> { + CollectionUtil.timSort(l); + return List.copyOf(l); + }); + return Map.copyOf(v); + }); + listener.onResponse(new IndicesShardStoresResponse(Map.copyOf(indicesStatuses), List.copyOf(failures))); + } + + private AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { + for (ShardRouting shardRouting : routingNodes.node(node.getId())) { + ShardId shardId = shardRouting.shardId(); + if (shardId.id() == shardID && shardId.getIndexName().equals(index)) { + if (shardRouting.primary()) { + return AllocationStatus.PRIMARY; + } else if (shardRouting.assignedToNode()) { + return AllocationStatus.REPLICA; + } else { + return AllocationStatus.UNUSED; + } + } + } + return AllocationStatus.UNUSED; + } + + /** + * A shard exists/existed in a node only if shard state file exists in the node + */ + private static boolean shardExistsInNode(final NodeGatewayStartedShards response) { + return response.storeException() != null || response.allocationId() != null; + } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 64af5d7dc3b2..abdc26981845 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.stats; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -45,7 +45,7 @@ public class CommonStats implements Writeable, ToXContentFragment { - private static final Version VERSION_SUPPORTING_NODE_MAPPINGS = Version.V_8_5_0; + private static final TransportVersion VERSION_SUPPORTING_NODE_MAPPINGS = TransportVersion.V_8_5_0; @Nullable public DocsStats docs; @@ -201,11 +201,11 @@ public CommonStats(StreamInput in) throws IOException { translog = in.readOptionalWriteable(TranslogStats::new); requestCache = in.readOptionalWriteable(RequestCacheStats::new); recoveryStats = in.readOptionalWriteable(RecoveryStats::new); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { bulk = in.readOptionalWriteable(BulkStats::new); } shards = in.readOptionalWriteable(ShardCountStats::new); - if (in.getVersion().onOrAfter(VERSION_SUPPORTING_NODE_MAPPINGS)) { + if (in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_NODE_MAPPINGS)) { nodeMappings = in.readOptionalWriteable(NodeMappingStats::new); } } @@ -228,11 +228,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(translog); out.writeOptionalWriteable(requestCache); out.writeOptionalWriteable(recoveryStats); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeOptionalWriteable(bulk); } out.writeOptionalWriteable(shards); - if (out.getVersion().onOrAfter(VERSION_SUPPORTING_NODE_MAPPINGS)) { + if (out.getTransportVersion().onOrAfter(VERSION_SUPPORTING_NODE_MAPPINGS)) { out.writeOptionalWriteable(nodeMappings); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index d56c344a05c4..24fea14e5321 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -55,7 +55,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { flags.add(flag); } } - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readStringArray(); } groups = in.readStringArray(); @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeLong(longFlags); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeStringArrayNullable(Strings.EMPTY_ARRAY); } out.writeStringArrayNullable(groups); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java index 7132449cee10..9b2efa83746e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java @@ -9,16 +9,17 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.broadcast.ChunkedBroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Map; -public class FieldUsageStatsResponse extends BroadcastResponse { +public class FieldUsageStatsResponse extends ChunkedBroadcastResponse { private final Map> stats; FieldUsageStatsResponse( @@ -48,19 +49,15 @@ public Map> getStats() { } @Override - protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { - final List>> sortedEntries = stats.entrySet() - .stream() - .sorted(Map.Entry.comparingByKey()) - .toList(); - for (Map.Entry> entry : sortedEntries) { + protected Iterator customXContentChunks(ToXContent.Params params) { + return stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).map(entry -> (ToXContent) (builder, p) -> { builder.startObject(entry.getKey()); builder.startArray("shards"); for (FieldUsageShardResponse resp : entry.getValue()) { resp.toXContent(builder, params); } builder.endArray(); - builder.endObject(); - } + return builder.endObject(); + }).iterator(); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index 0cc3c45b60d7..ca2f6f50b5d3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -149,4 +149,9 @@ public IndicesStatsRequestBuilder setIncludeSegmentFileSizes(boolean includeSegm request.includeSegmentFileSizes(includeSegmentFileSizes); return this; } + + public IndicesStatsRequestBuilder setIncludeUnloadedSegments(boolean includeUnloadedSegments) { + request.includeUnloadedSegments(includeUnloadedSegments); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 25c804a340a7..7c7add9262fd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -8,23 +8,27 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.action.support.broadcast.ChunkedBroadcastResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterIndexHealth; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.Index; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -33,7 +37,7 @@ import static java.util.Collections.unmodifiableMap; -public class IndicesStatsResponse extends BroadcastResponse { +public class IndicesStatsResponse extends ChunkedBroadcastResponse { private final Map indexHealthMap; @@ -46,7 +50,7 @@ public class IndicesStatsResponse extends BroadcastResponse { IndicesStatsResponse(StreamInput in) throws IOException { super(in); shards = in.readArray(ShardStats::new, ShardStats[]::new); - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { indexHealthMap = in.readMap(StreamInput::readString, ClusterHealthStatus::readFrom); indexStateMap = in.readMap(StreamInput::readString, IndexMetadata.State::readFrom); } else { @@ -61,21 +65,23 @@ public class IndicesStatsResponse extends BroadcastResponse { int successfulShards, int failedShards, List shardFailures, - ClusterState clusterState + Metadata metadata, + RoutingTable routingTable ) { super(totalShards, successfulShards, failedShards, shardFailures); this.shards = shards; - Objects.requireNonNull(clusterState); + Objects.requireNonNull(metadata); + Objects.requireNonNull(routingTable); Objects.requireNonNull(shards); Map indexHealthModifiableMap = new HashMap<>(); Map indexStateModifiableMap = new HashMap<>(); for (ShardStats shard : shards) { Index index = shard.getShardRouting().index(); - IndexMetadata indexMetadata = clusterState.getMetadata().index(index); + IndexMetadata indexMetadata = metadata.index(index); if (indexMetadata != null) { indexHealthModifiableMap.computeIfAbsent( index.getName(), - ignored -> new ClusterIndexHealth(indexMetadata, clusterState.routingTable().index(index)).getStatus() + ignored -> new ClusterIndexHealth(indexMetadata, routingTable.index(index)).getStatus() ); indexStateModifiableMap.computeIfAbsent(index.getName(), ignored -> indexMetadata.getState()); } @@ -164,14 +170,14 @@ public CommonStats getPrimaries() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeArray(shards); - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeMap(indexHealthMap, StreamOutput::writeString, (o, s) -> s.writeTo(o)); out.writeMap(indexStateMap, StreamOutput::writeString, (o, s) -> s.writeTo(o)); } } @Override - protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + protected Iterator customXContentChunks(ToXContent.Params params) { final String level = params.param("level", "indices"); final boolean isLevelValid = "cluster".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) @@ -179,22 +185,11 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t if (isLevelValid == false) { throw new IllegalArgumentException("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"); } - - builder.startObject("_all"); - - builder.startObject("primaries"); - getPrimaries().toXContent(builder, params); - builder.endObject(); - - builder.startObject("total"); - getTotal().toXContent(builder, params); - builder.endObject(); - - builder.endObject(); - if ("indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level)) { - builder.startObject(Fields.INDICES); - for (IndexStats indexStats : getIndices().values()) { + return Iterators.concat(Iterators.single(((builder, p) -> { + commonStats(builder, p); + return builder.startObject(Fields.INDICES); + })), getIndices().values().stream().map(indexStats -> (builder, p) -> { builder.startObject(indexStats.getIndex()); builder.field("uuid", indexStats.getUuid()); if (indexStats.getHealth() != null) { @@ -204,11 +199,11 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.field("status", indexStats.getState().toString().toLowerCase(Locale.ROOT)); } builder.startObject("primaries"); - indexStats.getPrimaries().toXContent(builder, params); + indexStats.getPrimaries().toXContent(builder, p); builder.endObject(); builder.startObject("total"); - indexStats.getTotal().toXContent(builder, params); + indexStats.getTotal().toXContent(builder, p); builder.endObject(); if ("shards".equalsIgnoreCase(level)) { @@ -217,17 +212,34 @@ protected void addCustomXContentFields(XContentBuilder builder, Params params) t builder.startArray(Integer.toString(indexShardStats.getShardId().id())); for (ShardStats shardStats : indexShardStats) { builder.startObject(); - shardStats.toXContent(builder, params); + shardStats.toXContent(builder, p); builder.endObject(); } builder.endArray(); } builder.endObject(); } - builder.endObject(); - } - builder.endObject(); + return builder.endObject(); + }).iterator(), Iterators.single((b, p) -> b.endObject())); } + return Iterators.single((b, p) -> { + commonStats(b, p); + return b; + }); + } + + private void commonStats(XContentBuilder builder, ToXContent.Params p) throws IOException { + builder.startObject("_all"); + + builder.startObject("primaries"); + getPrimaries().toXContent(builder, p); + builder.endObject(); + + builder.startObject("total"); + getTotal().toXContent(builder, p); + builder.endObject(); + + builder.endObject(); } static final class Fields { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index 457f7219a569..d5a7d17f2a4b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -26,7 +26,7 @@ public class ShardStats implements Writeable, ToXContentFragment { - private static final Version DEDUPLICATE_SHARD_PATH_VERSION = Version.V_8_4_0; + private static final TransportVersion DEDUPLICATE_SHARD_PATH_VERSION = TransportVersion.V_8_4_0; private final ShardRouting shardRouting; private final CommonStats commonStats; @@ -46,7 +46,7 @@ public ShardStats(StreamInput in) throws IOException { commonStats = new CommonStats(in); commitStats = CommitStats.readOptionalCommitStatsFrom(in); statePath = in.readString(); - if (in.getVersion().onOrAfter(DEDUPLICATE_SHARD_PATH_VERSION)) { + if (in.getTransportVersion().onOrAfter(DEDUPLICATE_SHARD_PATH_VERSION)) { dataPath = Objects.requireNonNullElse(in.readOptionalString(), this.statePath); } else { dataPath = in.readString(); @@ -164,7 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { commonStats.writeTo(out); out.writeOptionalWriteable(commitStats); out.writeString(statePath); - if (out.getVersion().onOrAfter(DEDUPLICATE_SHARD_PATH_VERSION)) { + if (out.getTransportVersion().onOrAfter(DEDUPLICATE_SHARD_PATH_VERSION)) { out.writeOptionalString(statePath.equals(dataPath) ? null : dataPath); } else { out.writeString(dataPath); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java index b458b24c7b5e..9b3214787105 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -67,20 +66,17 @@ protected FieldUsageShardResponse readShardResult(StreamInput in) throws IOExcep } @Override - protected FieldUsageStatsResponse newResponse( + protected ResponseFactory getResponseFactory( FieldUsageStatsRequest request, - int totalShards, - int successfulShards, - int failedShards, - List fieldUsages, - List shardFailures, ClusterState clusterState ) { - final Map> combined = new HashMap<>(); - for (FieldUsageShardResponse response : fieldUsages) { - combined.computeIfAbsent(response.shardRouting.shardId().getIndexName(), i -> new ArrayList<>()).add(response); - } - return new FieldUsageStatsResponse(totalShards, successfulShards, shardFailures.size(), shardFailures, combined); + return (totalShards, successfulShards, failedShards, fieldUsages, shardFailures) -> { + final Map> combined = new HashMap<>(); + for (FieldUsageShardResponse response : fieldUsages) { + combined.computeIfAbsent(response.shardRouting.shardId().getIndexName(), i -> new ArrayList<>()).add(response); + } + return new FieldUsageStatsResponse(totalShards, successfulShards, shardFailures.size(), shardFailures, combined); + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 75e0d3c878ab..3d799e7efc42 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -11,7 +11,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -35,7 +34,6 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.List; public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction { @@ -85,22 +83,19 @@ protected ShardStats readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesStatsResponse newResponse( - IndicesStatsRequest request, - int totalShards, - int successfulShards, - int failedShards, - List responses, - List shardFailures, - ClusterState clusterState - ) { - return new IndicesStatsResponse( - responses.toArray(new ShardStats[responses.size()]), + protected ResponseFactory getResponseFactory(IndicesStatsRequest request, ClusterState clusterState) { + // NB avoid capture of full cluster state + final var metadata = clusterState.getMetadata(); + final var routingTable = clusterState.routingTable(); + + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new IndicesStatsResponse( + responses.toArray(new ShardStats[0]), totalShards, successfulShards, failedShards, shardFailures, - clusterState + metadata, + routingTable ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index a332b57cd475..b2fb169b1f62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -9,7 +9,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -76,7 +76,7 @@ public PutIndexTemplateRequest(StreamInput in) throws IOException { order = in.readInt(); create = in.readBoolean(); settings = readSettingsFromStream(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { int size = in.readVInt(); for (int i = 0; i < size; i++) { in.readString(); // type - cannot assert on _doc because 7x allows arbitrary type names @@ -446,7 +446,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(order); out.writeBoolean(create); settings.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeVInt(mappings == null ? 0 : 1); if (mappings != null) { out.writeString(MapperService.SINGLE_MAPPING_NAME); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index 0721f499d600..e5a399aed85f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -33,7 +33,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { public ShardValidateQueryRequest(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { int typesSize = in.readVInt(); if (typesSize > 0) { for (int i = 0; i < typesSize; i++) { @@ -80,7 +80,7 @@ public long nowInMillis() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeVInt(0); // no types to filter } filteringAliases.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 57c66605e315..6747b76f2aec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; @@ -48,7 +48,7 @@ public ValidateQueryRequest() { public ValidateQueryRequest(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { int typesSize = in.readVInt(); if (typesSize > 0) { for (int i = 0; i < typesSize; i++) { @@ -137,7 +137,7 @@ public boolean allShards() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeVInt(0); // no types to filter } out.writeBoolean(explain); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 79c9dd2e6182..b5894d322b90 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -237,7 +237,7 @@ private Failure(String index, String id, Exception cause, RestStatus status, lon */ public Failure(StreamInput in) throws IOException { index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readString(); // can't make an assertion about type names here because too many tests still set their own // types bypassing various checks @@ -253,7 +253,7 @@ public Failure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeOptionalString(id); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java new file mode 100644 index 000000000000..37b92d9287aa --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java @@ -0,0 +1,415 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.bulk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; + +/** + * A bulk processor is a thread safe bulk processing class, allowing to easily set when to "flush" a new bulk request + * (either based on number of actions, based on the size, or time), and to easily control the number of concurrent bulk + * requests allowed to be executed in parallel. + *

+ * In order to create a new bulk processor, use the {@link Builder}. + */ +public class BulkProcessor2 { + + /** + * A listener for the execution. + */ + public interface Listener { + + /** + * Callback before the bulk is executed. + */ + void beforeBulk(long executionId, BulkRequest request); + + /** + * Callback after a successful execution of bulk request. + */ + void afterBulk(long executionId, BulkRequest request, BulkResponse response); + + /** + * Callback after a failed execution of bulk request. + *

+ * Note that in case an instance of InterruptedException is passed, which means that request processing has been + * cancelled externally, the thread's interruption status has been restored prior to calling this method. + */ + void afterBulk(long executionId, BulkRequest request, Exception failure); + } + + /** + * A builder used to create a build an instance of a bulk processor. + */ + public static class Builder { + + private final BiConsumer> consumer; + private final Listener listener; + private final ThreadPool threadPool; + private int maxRequestsInBulk = 1000; + private ByteSizeValue maxBulkSizeInBytes = new ByteSizeValue(5, ByteSizeUnit.MB); + private ByteSizeValue maxBytesInFlight = new ByteSizeValue(50, ByteSizeUnit.MB); + private TimeValue flushInterval = null; + private int maxNumberOfRetries = 3; + + private Builder(BiConsumer> consumer, Listener listener, ThreadPool threadPool) { + this.consumer = consumer; + this.listener = listener; + this.threadPool = threadPool; + } + + /** + * Sets when to flush a new bulk request based on the number of actions currently added. Defaults to + * {@code 1000}. Can be set to {@code -1} to disable it. + */ + public Builder setBulkActions(int bulkActions) { + this.maxRequestsInBulk = bulkActions; + return this; + } + + /** + * Sets when to flush a new bulk request based on the size of actions currently added. Defaults to + * {@code 5mb}. Can be set to {@code -1} to disable it. + */ + public Builder setBulkSize(ByteSizeValue maxBulkSizeInBytes) { + this.maxBulkSizeInBytes = maxBulkSizeInBytes; + return this; + } + + /** + * Sets a flush interval flushing *any* bulk actions pending if the interval passes. Defaults to not set. + *

+ * Note, both {@link #setBulkActions(int)} and {@link #setBulkSize(org.elasticsearch.common.unit.ByteSizeValue)} + * can be set to {@code -1} with the flush interval set allowing for complete async processing of bulk actions. + */ + public Builder setFlushInterval(TimeValue flushInterval) { + this.flushInterval = flushInterval; + return this; + } + + /** + * Sets the maximum number of times a BulkRequest will be retried if it fails. + */ + public Builder setMaxNumberOfRetries(int maxNumberOfRetries) { + assert maxNumberOfRetries >= 0; + this.maxNumberOfRetries = maxNumberOfRetries; + return this; + } + + /** + * Sets the maximum number of bytes allowed in in-flight requests (both the BulkRequest being built up by the BulkProcessor and + * any BulkRequests sent to Retry2 that have not yet completed) before subsequent calls to add()result in + * EsRejectedExecutionException. Defaults to 50mb. + */ + public Builder setMaxBytesInFlight(ByteSizeValue maxBytesInFlight) { + this.maxBytesInFlight = maxBytesInFlight; + return this; + } + + /** + * Builds a new bulk processor. + */ + public BulkProcessor2 build() { + return new BulkProcessor2( + consumer, + maxNumberOfRetries, + listener, + maxRequestsInBulk, + maxBulkSizeInBytes, + maxBytesInFlight, + flushInterval, + threadPool + ); + } + } + + /** + * @param consumer The consumer that is called to fulfil bulk operations. This consumer _must_ operate either very fast or + * asynchronously. + * @param listener The BulkProcessor2 listener that gets called on bulk events + * @param threadPool The threadpool used to schedule the flush task for this bulk processor, if flushInterval is not null. + * @return the builder for BulkProcessor2 + */ + public static Builder builder( + BiConsumer> consumer, + Listener listener, + ThreadPool threadPool + ) { + Objects.requireNonNull(consumer, "consumer"); + Objects.requireNonNull(listener, "listener"); + return new Builder(consumer, listener, threadPool); + } + + private final int maxActionsPerBulkRequest; + private final long maxBulkSizeBytes; + private final ByteSizeValue maxBytesInFlight; + /* + * This is the approximate total number of bytes in in-flight requests, both in the BulkRequest that it is building up and in all of + * the BulkRequests that it has sent to Retry2 that have not completed yet. If this number would exceeds maxBytesInFlight, then calls + * to add() will throw EsRejectedExecutionExceptions. + */ + private final AtomicLong totalBytesInFlight = new AtomicLong(0); + + /** + * This is a task (which might be null) that is scheduled at some pont in the future to flush the bulk request and start a new bulk + * request. This variable is read and written to from multiple threads, and is protected by mutex. + */ + private volatile Scheduler.Cancellable cancellableFlushTask = null; + + private final AtomicLong executionIdGen = new AtomicLong(); + + private static final Logger logger = LogManager.getLogger(BulkProcessor2.class); + + private final BiConsumer> consumer; + private final Listener listener; + + private final Retry2 retry; + + private final TimeValue flushInterval; + + private final ThreadPool threadPool; + + /* + * This is the BulkRequest that is being built up by this class in calls to the various add methods. + */ + private BulkRequest bulkRequestUnderConstruction; + + private volatile boolean closed = false; + /* + * This mutex is used to protect two things related to the bulkRequest object: (1) it makes sure that two threads do not add requests + * to the BulkRequest at the same time since BulkRequest is not threadsafe and (2) it makes sure that no other thread is writing to + * the BulkRequest when we swap the bulkRequest variable over to a new BulkRequest object. It also protects access to + * cancellableFlushTask. + */ + private final Object mutex = new Object(); + + BulkProcessor2( + BiConsumer> consumer, + int maxNumberOfRetries, + Listener listener, + int maxActionsPerBulkRequest, + ByteSizeValue maxBulkSize, + ByteSizeValue maxBytesInFlight, + @Nullable TimeValue flushInterval, + ThreadPool threadPool + ) { + this.maxActionsPerBulkRequest = maxActionsPerBulkRequest; + this.maxBulkSizeBytes = maxBulkSize.getBytes(); + this.maxBytesInFlight = maxBytesInFlight; + this.bulkRequestUnderConstruction = new BulkRequest(); + this.consumer = consumer; + this.listener = listener; + this.retry = new Retry2(maxNumberOfRetries); + this.flushInterval = flushInterval; + this.threadPool = threadPool; + } + + /** + * Closes the processor. Any remaining bulk actions are flushed if they can be flushed in the given time. + *

+ * Waits for up to the specified timeout for all bulk requests to complete then returns + * + * @param timeout The maximum time to wait for the bulk requests to complete + * @param unit The time unit of the {@code timeout} argument + * @throws InterruptedException If the current thread is interrupted + */ + public void awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + synchronized (mutex) { + if (closed) { + return; + } + closed = true; + + if (cancellableFlushTask != null) { + cancellableFlushTask.cancel(); + } + + if (bulkRequestUnderConstruction.numberOfActions() > 0) { + execute(); + } + this.retry.awaitClose(timeout, unit); + } + } + + /** + * Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest} + * (for example, if no id is provided, one will be generated, or usage of the create flag). + * @throws EsRejectedExecutionException if adding the approximate size in bytes of the request to totalBytesInFlight would exceed + * maxBytesInFlight + */ + public BulkProcessor2 add(IndexRequest request) throws EsRejectedExecutionException { + return add((DocWriteRequest) request); + } + + /** + * Adds an {@link DeleteRequest} to the list of actions to execute. + * @throws EsRejectedExecutionException if adding the approximate size in bytes of the request to totalBytesInFlight would exceed + * maxBytesInFlight + */ + public BulkProcessor2 add(DeleteRequest request) throws EsRejectedExecutionException { + return add((DocWriteRequest) request); + } + + /** + * Adds either a delete or an index request. + * @throws EsRejectedExecutionException if the total bytes already in flight exceeds maxBytesInFlight. In this case, the request will + * not be retried and it is on the client to decide whether to wait and try later. + */ + private BulkProcessor2 add(DocWriteRequest request) throws EsRejectedExecutionException { + internalAdd(request); + return this; + } + + /* + * Exposed for unit testing + */ + long getTotalBytesInFlight() { + return totalBytesInFlight.get(); + } + + protected void ensureOpen() { + if (closed) { + throw new IllegalStateException("bulk process already closed"); + } + } + + private void internalAdd(DocWriteRequest request) throws EsRejectedExecutionException { + // bulkRequest and instance swapping is not threadsafe, so execute the mutations under a mutex. + // once the bulk request is ready to be shipped swap the instance reference unlock and send the local reference to the handler. + Tuple bulkRequestToExecute; + synchronized (mutex) { + ensureOpen(); + if (totalBytesInFlight.get() >= maxBytesInFlight.getBytes()) { + throw new EsRejectedExecutionException( + "Cannot index request of size " + + bulkRequestUnderConstruction.estimatedSizeInBytes() + + " because " + + totalBytesInFlight.get() + + " bytes are already in flight and the max is " + + maxBytesInFlight + ); + } + long bytesBeforeNewRequest = bulkRequestUnderConstruction.estimatedSizeInBytes(); + bulkRequestUnderConstruction.add(request); + totalBytesInFlight.addAndGet(bulkRequestUnderConstruction.estimatedSizeInBytes() - bytesBeforeNewRequest); + bulkRequestToExecute = newBulkRequestIfNeeded(); + } + // execute sending the local reference outside the lock to allow handler to control the concurrency via it's configuration. + if (bulkRequestToExecute != null) { + execute(bulkRequestToExecute.v1(), bulkRequestToExecute.v2()); + } + /* + * We could have the flush task running nonstop, checking every flushInterval whether there was data to flush. But there is + * likely to not be data almost all of the time, so this would waste a thread's time. So instead we schedule a flush task + * whenever we add data. If a task is already scheduled, it does nothing. Since both the cancellableFlushTask and the + * bulkRequestUnderConstruction are protected by the same mutex, there is no risk that a request will be left hanging. + */ + scheduleFlushTask(); + } + + /** + * This method schedules a flush task to run flushInterval in the future if flushInterval is not null and if there is not already a + * flush task scheduled. + */ + private void scheduleFlushTask() { + if (flushInterval == null) { + return; + } + /* + * This method is called from multiple threads. We synchronize on mutex here so that we are sure that cancellableFlushTask is not + * changed between when we check it and when we set it (whether that is a transition from null -> not null from another thread + * in this method or a change from not null -> null from the scheduled task). + */ + synchronized (mutex) { + if (cancellableFlushTask == null) { + cancellableFlushTask = threadPool.schedule(() -> { + synchronized (mutex) { + if (closed == false && bulkRequestUnderConstruction.numberOfActions() > 0) { + execute(); + } + cancellableFlushTask = null; + } + }, flushInterval, ThreadPool.Names.GENERIC); + } + } + } + + private Tuple newBulkRequestIfNeeded() { + assert Thread.holdsLock(mutex); + ensureOpen(); + if (bulkRequestExceedsLimits() || totalBytesInFlight.get() >= maxBytesInFlight.getBytes()) { + final BulkRequest bulkRequest = this.bulkRequestUnderConstruction; + this.bulkRequestUnderConstruction = new BulkRequest(); + return new Tuple<>(bulkRequest, executionIdGen.incrementAndGet()); + } + return null; + } + + /** + * This method sends the bulkRequest to the consumer up to maxNumberOfRetries times. The executionId is used to notify the listener + * both before and after the request. + * @param bulkRequest + * @param executionId + */ + private void execute(BulkRequest bulkRequest, long executionId) { + try { + listener.beforeBulk(executionId, bulkRequest); + retry.consumeRequestWithRetries(consumer, bulkRequest, new ActionListener<>() { + @Override + public void onResponse(BulkResponse response) { + totalBytesInFlight.addAndGet(-1 * bulkRequest.estimatedSizeInBytes()); + listener.afterBulk(executionId, bulkRequest, response); + } + + @Override + public void onFailure(Exception e) { + totalBytesInFlight.addAndGet(-1 * bulkRequest.estimatedSizeInBytes()); + listener.afterBulk(executionId, bulkRequest, e); + } + }); + } catch (Exception e) { + logger.warn(() -> "Failed to execute bulk request " + executionId + ".", e); + totalBytesInFlight.addAndGet(-1 * bulkRequest.estimatedSizeInBytes()); + listener.afterBulk(executionId, bulkRequest, e); + } + } + + private void execute() { + assert Thread.holdsLock(mutex); + final BulkRequest bulkRequest = this.bulkRequestUnderConstruction; + final long executionId = executionIdGen.incrementAndGet(); + this.bulkRequestUnderConstruction = new BulkRequest(); + execute(bulkRequest, executionId); + } + + private boolean bulkRequestExceedsLimits() { + assert Thread.holdsLock(mutex); + if (maxActionsPerBulkRequest != -1 && bulkRequestUnderConstruction.numberOfActions() >= maxActionsPerBulkRequest) { + return true; + } + return maxBulkSizeBytes != -1 && bulkRequestUnderConstruction.estimatedSizeInBytes() >= maxBulkSizeBytes; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index 4f5d54ac58e5..d00585c98933 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -357,6 +357,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) + .setDynamicTemplates(dynamicTemplates) .setRequireAlias(requireAlias), type ); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java new file mode 100644 index 000000000000..6c60d07f6e88 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry2.java @@ -0,0 +1,274 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.action.bulk; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestStatus; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; +import java.util.function.Predicate; +import java.util.stream.StreamSupport; + +/** + * Encapsulates asynchronous retry logic. This class will attempt to load a BulkRequest up to numberOfRetries times. If that number of + * times is exhausted, it sends the listener an EsRejectedExecutionException. + */ +class Retry2 { + private static final Logger logger = LogManager.getLogger(Retry2.class); + private final int maxNumberOfRetries; + /** + * Once awaitClose() has been called this is set to true. Any new requests that come in (whether via consumeRequestWithRetries() or a + * retry) will be rejected by sending EsRejectedExecutionExceptions to their listeners. + */ + private boolean isClosing = false; + /* + * We register in-flight calls with this Phaser so that we know whether there are any still in flight when we call awaitClose(). The + * phaser is initialized with 1 party intentionally. This is because if the number of parties goes over 0 and then back down to 0 the + * phaser is automatically terminated. Since we're tracking the number of in flight calls to Elasticsearch we expect this to happen + * often. Putting an initial party in here makes sure that the phaser is never terminated before we're ready for it. + */ + private final Phaser inFlightRequestsPhaser = new Phaser(1); + + /** + * Creates a Retry2. + * @param maxNumberOfRetries This is the maximum number of times a BulkRequest will be retried + */ + Retry2(int maxNumberOfRetries) { + this.maxNumberOfRetries = maxNumberOfRetries; + } + + /** + * This method attempts to load the given BulkRequest (via the given BiConsumer). If the initial load fails with a retry-able reason, + * this class will retry the load up to maxNumberOfRetries times. The given ActionListener will be notified of the result, either on + * success or after failure when no retries are left. The listener is not notified of failures if it is still possible to retry. + * @param consumer The consumer to which apply the request and listener. This consumer is expected to perform its work asynchronously + * (that is, not block the thread from which it is called). + * @param bulkRequest The bulk request that should be executed. + * @param listener A listener that is invoked when the bulk request finishes or completes with an exception. + */ + public void consumeRequestWithRetries( + BiConsumer> consumer, + BulkRequest bulkRequest, + ActionListener listener + ) { + if (isClosing) { + listener.onFailure(new EsRejectedExecutionException("The bulk processor is closing")); + return; + } + List responsesAccumulator = new ArrayList<>(); + logger.trace("Sending a bulk request with {} bytes in {} items", bulkRequest.estimatedSizeInBytes(), bulkRequest.requests.size()); + inFlightRequestsPhaser.register(); + consumer.accept(bulkRequest, new RetryHandler(bulkRequest, responsesAccumulator, consumer, listener, maxNumberOfRetries)); + } + + /** + * Retries the bulkRequestForRetry if retriesRemaining is greater than 0, otherwise notifies the listener of failure + * @param bulkRequestForRetry The bulk request for retry. This should only include the items that have not previously succeeded + * @param responsesAccumulator An accumulator for all BulkItemResponses for the original bulkRequest across all retries + * @param consumer + * @param listener The listener to be notified of success or failure on this retry or subsequent retries + * @param retriesRemaining The number of times remaining that this BulkRequest can be retried + */ + private void retry( + BulkRequest bulkRequestForRetry, + List responsesAccumulator, + BiConsumer> consumer, + ActionListener listener, + int retriesRemaining + ) { + if (isClosing) { + listener.onFailure(new EsRejectedExecutionException("The bulk processor is closing")); + return; + } + if (retriesRemaining > 0) { + inFlightRequestsPhaser.register(); + consumer.accept( + bulkRequestForRetry, + new RetryHandler(bulkRequestForRetry, responsesAccumulator, consumer, listener, retriesRemaining - 1) + ); + } else { + listener.onFailure( + new EsRejectedExecutionException( + "Could not retry the bulk request because the backoff policy does not allow any more retries" + ) + ); + } + } + + /** + * This method makes an attempt to wait for any outstanding requests to complete. Any new requests that come in after this method has + * been called (whether via consumeRequestWithRetries() or a retry) will be rejected by sending EsRejectedExecutionExceptions to their + * listeners. + * @param timeout + * @param unit + */ + void awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + isClosing = true; + /* + * This removes the party that was placed in the phaser at initialization so that the phaser will terminate once all in-flight + * requests have been completed (i.e. this makes it possible that the number of parties can become 0). + */ + inFlightRequestsPhaser.arriveAndDeregister(); + try { + inFlightRequestsPhaser.awaitAdvanceInterruptibly(0, timeout, unit); + } catch (TimeoutException e) { + logger.debug("Timed out waiting for all requests to complete during awaitClose"); + } + } + + /** + * This listener will retry any failed requests within a bulk request if possible. It only delegates to the underlying listener once + * either all requests have succeeded or all retry attempts have been exhausted. + */ + private final class RetryHandler implements ActionListener { + private static final RestStatus RETRY_STATUS = RestStatus.TOO_MANY_REQUESTS; + private final BulkRequest bulkRequest; + private final BiConsumer> consumer; + private final ActionListener listener; + private final List responsesAccumulator; + private final long startTimestampNanos; + private final int retriesRemaining; + + /** + * Creates a RetryHandler listener + * @param bulkRequest The BulkRequest to be sent, a subset of the original BulkRequest. + * @param responsesAccumulator The accumulator of all BulkItemResponses for the original BulkRequest. These are completed + * responses, meaning responses for successes, or responses for failures only if no more retries are + * allowed. + * @param consumer + * @param listener The delegate listener + * @param retriesRemaining The number of retry attempts remaining for the bulkRequestForRetry + */ + RetryHandler( + BulkRequest bulkRequest, + List responsesAccumulator, + BiConsumer> consumer, + ActionListener listener, + int retriesRemaining + ) { + this.bulkRequest = bulkRequest; + this.responsesAccumulator = responsesAccumulator; + this.consumer = consumer; + this.listener = listener; + this.startTimestampNanos = System.nanoTime(); + this.retriesRemaining = retriesRemaining; + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + if (bulkItemResponses.hasFailures() == false) { + logger.trace( + "Got a response in {} with {} items, no failures", + bulkItemResponses.getTook(), + bulkItemResponses.getItems().length + ); + // we're done here, include all responses + addResponses(bulkItemResponses, (r -> true)); + listener.onResponse(getAccumulatedResponse()); + } else { + if (canRetry(bulkItemResponses)) { + logger.trace( + "Got a response in {} with {} items including failures, can retry", + bulkItemResponses.getTook(), + bulkItemResponses.getItems().length + ); + addResponses(bulkItemResponses, (r -> r.isFailed() == false)); + BulkRequest retryRequest = createBulkRequestForRetry(bulkItemResponses); + retry(retryRequest, responsesAccumulator, consumer, listener, retriesRemaining); + } else { + logger.trace( + "Got a response in {} with {} items including failures, cannot retry", + bulkItemResponses.getTook(), + bulkItemResponses.getItems().length + ); + addResponses(bulkItemResponses, (r -> true)); + listener.onResponse(getAccumulatedResponse()); + } + } + inFlightRequestsPhaser.arriveAndDeregister(); + } + + @Override + public void onFailure(Exception e) { + boolean canRetry = ExceptionsHelper.status(e) == RETRY_STATUS && retriesRemaining > 0; + if (canRetry) { + inFlightRequestsPhaser.arriveAndDeregister(); + retry(bulkRequest, responsesAccumulator, consumer, listener, retriesRemaining); + } else { + listener.onFailure(e); + inFlightRequestsPhaser.arriveAndDeregister(); + } + } + + /** + * This creates a new BulkRequest from only those items in the bulkItemsResponses that failed. + * @param bulkItemResponses The latest response (including any successes and failures) + * @return + */ + private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) { + BulkRequest requestToReissue = new BulkRequest(); + int index = 0; + for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) { + if (bulkItemResponse.isFailed()) { + DocWriteRequest originalBulkItemRequest = bulkRequest.requests().get(index); + if (originalBulkItemRequest instanceof IndexRequest item) { + item.reset(); + } + requestToReissue.add(originalBulkItemRequest); + } + index++; + } + return requestToReissue; + } + + /** + * Returns true if the given bulkItemResponses can be retried. + * @param bulkItemResponses + * @return + */ + private boolean canRetry(BulkResponse bulkItemResponses) { + if (retriesRemaining == 0) { + return false; + } + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + if (bulkItemResponse.isFailed()) { + final RestStatus status = bulkItemResponse.status(); + if (status != RETRY_STATUS) { + return false; + } + } + } + return true; + } + + private void addResponses(BulkResponse response, Predicate filter) { + List bulkItemResponses = StreamSupport.stream(response.spliterator(), false).filter(filter).toList(); + responsesAccumulator.addAll(bulkItemResponses); + } + + private BulkResponse getAccumulatedResponse() { + BulkItemResponse[] itemResponses = responsesAccumulator.toArray(new BulkItemResponse[0]); + long stopTimestamp = System.nanoTime(); + long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis(); + logger.trace("Accumulated response includes {} items", itemResponses.length); + return new BulkResponse(itemResponses, totalLatencyMs); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 827cdc57d21a..c942cb6aa391 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -30,6 +30,8 @@ import org.elasticsearch.action.ingest.IngestActionForwarder; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateRequest; @@ -44,8 +46,10 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Releasable; @@ -80,6 +84,7 @@ import java.util.function.LongSupplier; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.EXCLUDED_DATA_STREAMS_KEY; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -188,7 +193,32 @@ public static ActionListe } @Override - protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener listener) { + protected void doExecute(Task task, BulkRequest bulkRequest, ActionListener outerListener) { + // As a work-around to support `?refresh`, explicitly replace the refresh policy with a call to the Refresh API, + // and always set forced_refresh to true. + // TODO: Replace with a less hacky approach. + ActionListener listener = outerListener; + if (DiscoveryNode.isStateless(clusterService.getSettings()) && bulkRequest.getRefreshPolicy() != WriteRequest.RefreshPolicy.NONE) { + listener = outerListener.delegateFailure((l, r) -> { + final Set indices = new HashSet<>(); + for (BulkItemResponse response : r.getItems()) { + if (response.isFailed() == false) { + indices.add(response.getIndex()); + } + DocWriteResponse docWriteResponse = response.getResponse(); + if (docWriteResponse != null) { + docWriteResponse.setForcedRefresh(true); + } + } + client.admin() + .indices() + .prepareRefresh() + .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) + .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) + .execute(l.map(ignored -> r)); + }); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); + } /* * This is called on the Transport tread so we can check the indexing * memory pressure *quickly* but we don't want to keep the transport @@ -234,7 +264,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec } if (actionRequest instanceof IndexRequest ir) { - ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion); + ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion.transportVersion); if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) { throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally"); } @@ -245,7 +275,7 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec // this method (doExecute) will be called again, but with the bulk requests updated from the ingest node processing but // also with IngestService.NOOP_PIPELINE_NAME on each request. This ensures that this on the second time through this method, // this path is never taken. - try { + ActionListener.run(listener, l -> { if (Assertions.ENABLED) { final boolean arePipelinesResolved = bulkRequest.requests() .stream() @@ -255,13 +285,11 @@ protected void doInternalExecute(Task task, BulkRequest bulkRequest, String exec assert arePipelinesResolved : bulkRequest; } if (clusterService.localNode().isIngestNode()) { - processBulkIndexIngestRequest(task, bulkRequest, executorName, listener); + processBulkIndexIngestRequest(task, bulkRequest, executorName, l); } else { - ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener); + ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, l); } - } catch (Exception e) { - listener.onFailure(e); - } + }); return; } @@ -754,10 +782,18 @@ private static class ConcreteIndices { } IndexAbstraction resolveIfAbsent(DocWriteRequest request) { - return indexAbstractions.computeIfAbsent( - request.index(), - key -> indexNameExpressionResolver.resolveWriteIndexAbstraction(state, request) - ); + try { + return indexAbstractions.computeIfAbsent( + request.index(), + key -> indexNameExpressionResolver.resolveWriteIndexAbstraction(state, request) + ); + } catch (IndexNotFoundException e) { + if (e.getMetadataKeys().contains(EXCLUDED_DATA_STREAMS_KEY)) { + throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams", e); + } else { + throw e; + } + } } IndexRouting routing(Index index) { @@ -780,6 +816,7 @@ private void processBulkIndexIngestRequest( ingestService.executeBulkRequest( original.numberOfActions(), () -> bulkRequestModifier, + bulkRequestModifier::markItemAsDropped, bulkRequestModifier::markItemAsFailed, (originalThread, exception) -> { if (exception != null) { @@ -823,7 +860,6 @@ public boolean isForceExecution() { } } }, - bulkRequestModifier::markItemAsDropped, executorName ); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 44037a9dce6a..e7a7573899df 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -273,7 +273,6 @@ private void finishRequest() { context.getBulkShardRequest(), context.buildShardResponse(), context.getLocationToSync(), - null, context.getPrimary(), logger, postWriteAction diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java index 49bcf38ac0e5..f4eae07cc291 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.datastreams; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; @@ -66,7 +66,7 @@ public ActionRequestValidationException validate() { public Request(StreamInput in) throws IOException { super(in); this.name = in.readString(); - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { this.startTime = in.readVLong(); } else { this.startTime = System.currentTimeMillis(); @@ -77,7 +77,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(name); - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeVLong(startTime); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 00bd66fc91b3..6ab2d390be37 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.datastreams; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -161,7 +161,7 @@ public DataStreamInfo( ClusterHealthStatus.readFrom(in), in.readOptionalString(), in.readOptionalString(), - in.getVersion().onOrAfter(Version.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null + in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null ); } @@ -194,7 +194,7 @@ public void writeTo(StreamOutput out) throws IOException { dataStreamStatus.writeTo(out); out.writeOptionalString(indexTemplate); out.writeOptionalString(ilmPolicyName); - if (out.getVersion().onOrAfter(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { out.writeOptionalWriteable(timeSeries); } } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 42914d85bdeb..d944ada2f424 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.DocWriteRequest; @@ -64,7 +64,7 @@ public DeleteRequest(StreamInput in) throws IOException { public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -256,7 +256,7 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeBody(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index aceeb3bde53a..685eb0b8a199 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.explain; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.single.shard.SingleShardRequest; @@ -54,7 +54,7 @@ public ExplainRequest(String index, String id) { ExplainRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type); } @@ -160,7 +160,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 0b0f340cf61e..97c56069fa76 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.explain; import org.apache.lucene.search.Explanation; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,7 +69,7 @@ public ExplainResponse(String index, String id, boolean exists, Explanation expl public ExplainResponse(StreamInput in) throws IOException { super(in); index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readString(); } id = in.readString(); @@ -118,7 +118,7 @@ public RestStatus status() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index eec23999dedc..daa9ee30bf9a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -211,7 +211,7 @@ public FieldCapabilities( isSearchable, isAggregatable, isDimension == null ? false : isDimension, - metricType != null ? Enum.valueOf(TimeSeriesParams.MetricType.class, metricType) : null, + metricType != null ? TimeSeriesParams.MetricType.fromString(metricType) : null, indices != null ? indices.toArray(new String[0]) : null, nonSearchableIndices != null ? nonSearchableIndices.toArray(new String[0]) : null, nonAggregatableIndices != null ? nonAggregatableIndices.toArray(new String[0]) : null, @@ -227,7 +227,7 @@ public FieldCapabilities( this.isMetadataField = in.readBoolean(); this.isSearchable = in.readBoolean(); this.isAggregatable = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { this.isDimension = in.readBoolean(); this.metricType = in.readOptionalEnum(TimeSeriesParams.MetricType.class); } else { @@ -237,7 +237,7 @@ public FieldCapabilities( this.indices = in.readOptionalStringArray(); this.nonSearchableIndices = in.readOptionalStringArray(); this.nonAggregatableIndices = in.readOptionalStringArray(); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { this.nonDimensionIndices = in.readOptionalStringArray(); this.metricConflictsIndices = in.readOptionalStringArray(); } else { @@ -254,14 +254,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isMetadataField); out.writeBoolean(isSearchable); out.writeBoolean(isAggregatable); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeBoolean(isDimension); out.writeOptionalEnum(metricType); } out.writeOptionalStringArray(indices); out.writeOptionalStringArray(nonSearchableIndices); out.writeOptionalStringArray(nonAggregatableIndices); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeOptionalStringArray(nonDimensionIndices); out.writeOptionalStringArray(metricConflictsIndices); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java index 9a44a188b85b..ec03a4465eea 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFetcher.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.tasks.CancellableTask; import java.io.IOException; import java.util.Collections; @@ -45,6 +46,7 @@ class FieldCapabilitiesFetcher { } FieldCapabilitiesIndexResponse fetch( + CancellableTask task, ShardId shardId, String[] fieldPatterns, String[] filters, @@ -78,7 +80,7 @@ FieldCapabilitiesIndexResponse fetch( return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), indexMappingHash, existing, true); } } - + task.ensureNotCancelled(); Predicate fieldPredicate = indicesService.getFieldFilter().apply(shardId.getIndexName()); final Map responseMap = retrieveFieldCaps( searchExecutionContext, diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 3d03cfc92e1e..ab13525014cc 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -23,14 +23,14 @@ import java.util.stream.Stream; final class FieldCapabilitiesIndexResponse implements Writeable { - private static final Version MAPPING_HASH_VERSION = Version.V_8_2_0; + private static final TransportVersion MAPPING_HASH_VERSION = TransportVersion.V_8_2_0; private final String indexName; @Nullable private final String indexMappingHash; private final Map responseMap; private final boolean canMatch; - private final transient Version originVersion; + private final transient TransportVersion originVersion; FieldCapabilitiesIndexResponse( String indexName, @@ -42,15 +42,15 @@ final class FieldCapabilitiesIndexResponse implements Writeable { this.indexMappingHash = indexMappingHash; this.responseMap = responseMap; this.canMatch = canMatch; - this.originVersion = Version.CURRENT; + this.originVersion = TransportVersion.CURRENT; } FieldCapabilitiesIndexResponse(StreamInput in) throws IOException { this.indexName = in.readString(); this.responseMap = in.readMap(StreamInput::readString, IndexFieldCapabilities::new); this.canMatch = in.readBoolean(); - this.originVersion = in.getVersion(); - if (in.getVersion().onOrAfter(MAPPING_HASH_VERSION)) { + this.originVersion = in.getTransportVersion(); + if (in.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) { this.indexMappingHash = in.readOptionalString(); } else { this.indexMappingHash = null; @@ -62,7 +62,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); out.writeMap(responseMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); out.writeBoolean(canMatch); - if (out.getVersion().onOrAfter(MAPPING_HASH_VERSION)) { + if (out.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) { out.writeOptionalString(indexMappingHash); } } @@ -87,7 +87,7 @@ List getResponses() { } static List readList(StreamInput input) throws IOException { - if (input.getVersion().before(MAPPING_HASH_VERSION)) { + if (input.getTransportVersion().before(MAPPING_HASH_VERSION)) { return input.readList(FieldCapabilitiesIndexResponse::new); } final List ungroupedList = input.readList(FieldCapabilitiesIndexResponse::new); @@ -96,7 +96,7 @@ static List readList(StreamInput input) throws I } static void writeList(StreamOutput output, List responses) throws IOException { - if (output.getVersion().before(MAPPING_HASH_VERSION)) { + if (output.getTransportVersion().before(MAPPING_HASH_VERSION)) { output.writeCollection(responses); return; } @@ -152,7 +152,7 @@ public IndexFieldCapabilities getField(String field) { return responseMap.get(field); } - Version getOriginVersion() { + TransportVersion getOriginVersion() { return originVersion; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java index 12bd84952f04..ef1c068bdc3e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -19,6 +19,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Arrays; @@ -41,7 +44,7 @@ class FieldCapabilitiesNodeRequest extends ActionRequest implements IndicesReque super(in); shardIds = in.readList(ShardId::new); fields = in.readStringArray(); - if (in.getVersion().onOrAfter(Version.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { filters = in.readStringArray(); allowedTypes = in.readStringArray(); } else { @@ -121,7 +124,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeList(shardIds); out.writeStringArray(fields); - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeStringArray(filters); out.writeStringArray(allowedTypes); } @@ -136,6 +139,30 @@ public ActionRequestValidationException validate() { return null; } + @Override + public String getDescription() { + final StringBuilder stringBuilder = new StringBuilder("shards["); + Strings.collectionToDelimitedStringWithLimit(shardIds, ",", "", "", 1024, stringBuilder); + stringBuilder.append("], fields["); + Strings.collectionToDelimitedStringWithLimit(Arrays.asList(fields), ",", "", "", 1024, stringBuilder); + stringBuilder.append("], filters["); + stringBuilder.append(Strings.collectionToDelimitedString(Arrays.asList(filters), ",")); + stringBuilder.append("], types["); + stringBuilder.append(Strings.collectionToDelimitedString(Arrays.asList(allowedTypes), ",")); + stringBuilder.append("]"); + return stringBuilder.toString(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return FieldCapabilitiesNodeRequest.this.getDescription(); + } + }; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index b1b51de5c069..a19767002623 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -18,6 +18,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -55,7 +58,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); nowInMillis = in.readOptionalLong(); runtimeFields = in.readMap(); - if (in.getVersion().onOrAfter(Version.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { filters = in.readStringArray(); types = in.readStringArray(); } @@ -93,7 +96,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(indexFilter); out.writeOptionalLong(nowInMillis); out.writeGenericMap(runtimeFields); - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeStringArray(filters); out.writeStringArray(types); } @@ -271,4 +274,13 @@ public String getDescription() { return stringBuilder.toString(); } + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return FieldCapabilitiesRequest.this.getDescription(); + } + }; + } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index f1bbc144cb4a..44ab201eb368 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -34,7 +34,7 @@ /** * Response for {@link FieldCapabilitiesRequest} requests. */ -public class FieldCapabilitiesResponse extends ActionResponse implements ChunkedToXContent { +public class FieldCapabilitiesResponse extends ActionResponse implements ChunkedToXContentObject { private static final ParseField INDICES_FIELD = new ParseField("indices"); private static final ParseField FIELDS_FIELD = new ParseField("fields"); private static final ParseField FAILED_INDICES_FIELD = new ParseField("failed_indices"); @@ -152,7 +152,7 @@ private static void writeField(StreamOutput out, Map } @Override - public Iterator toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params params) { if (indexResponses.size() > 0) { throw new IllegalStateException("cannot serialize non-merged response"); } @@ -241,6 +241,9 @@ public int hashCode() { @Override public String toString() { + if (indexResponses.size() > 0) { + return "FieldCapabilitiesResponse{unmerged}"; + } return Strings.toString(this); } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java index 383f5f2a0d4f..9b3b7cd08c6c 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -68,7 +68,7 @@ public class IndexFieldCapabilities implements Writeable { this.isMetadatafield = in.readBoolean(); this.isSearchable = in.readBoolean(); this.isAggregatable = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { this.isDimension = in.readBoolean(); this.metricType = in.readOptionalEnum(TimeSeriesParams.MetricType.class); } else { @@ -85,7 +85,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isMetadatafield); out.writeBoolean(isSearchable); out.writeBoolean(isAggregatable); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeBoolean(isDimension); out.writeOptionalEnum(metricType); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java index aad38a5bf48a..d7d923ec089a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import java.util.HashMap; import java.util.Map; @@ -24,12 +24,12 @@ final class ResponseRewriter { public static Map rewriteOldResponses( - Version version, + TransportVersion version, Map input, String[] filters, String[] allowedTypes ) { - if (version.onOrAfter(Version.V_8_2_0)) { + if (version.onOrAfter(TransportVersion.V_8_2_0)) { return input; // nothing needs to be done } Function transformer = buildTransformer(input, filters, allowedTypes); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index c0dae35971cd..746c97b6e247 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.search.SearchService; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; @@ -93,6 +94,8 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti if (ccsCheckCompatibility) { checkCCSVersionCompatibility(request); } + assert task instanceof CancellableTask; + final CancellableTask fieldCapTask = (CancellableTask) task; // retrieve the initial timestamp in case the action is a cross cluster search long nowInMillis = request.nowInMillis() == null ? System.currentTimeMillis() : request.nowInMillis(); final ClusterState clusterState = clusterService.state(); @@ -129,7 +132,7 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti final FailureCollector indexFailures = new FailureCollector(); // One for each cluster including the local cluster final CountDown completionCounter = new CountDown(1 + remoteClusterIndices.size()); - final Runnable countDown = createResponseMerger(request, completionCounter, indexResponses, indexFailures, listener); + final Runnable countDown = createResponseMerger(request, fieldCapTask, completionCounter, indexResponses, indexFailures, listener); final RequestDispatcher requestDispatcher = new RequestDispatcher( clusterService, transportService, @@ -180,6 +183,7 @@ private static void checkIndexBlocks(ClusterState clusterState, String[] concret private Runnable createResponseMerger( FieldCapabilitiesRequest request, + CancellableTask task, CountDown completionCounter, Map indexResponses, FailureCollector indexFailures, @@ -193,7 +197,7 @@ private Runnable createResponseMerger( // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable // on a transport thread in case of large numbers of indices and/or fields threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION) - .submit(ActionRunnable.supply(listener, () -> merge(indexResponses, request, new ArrayList<>(failures)))); + .submit(ActionRunnable.supply(listener, () -> merge(indexResponses, task, request, new ArrayList<>(failures)))); } else { listener.onResponse( new FieldCapabilitiesResponse(new ArrayList<>(indexResponses.values()), new ArrayList<>(failures)) @@ -238,9 +242,11 @@ private static boolean hasSameMappingHash(FieldCapabilitiesIndexResponse r1, Fie private FieldCapabilitiesResponse merge( Map indexResponsesMap, + CancellableTask task, FieldCapabilitiesRequest request, List failures ) { + task.ensureNotCancelled(); final FieldCapabilitiesIndexResponse[] indexResponses = indexResponsesMap.values() .stream() .sorted(Comparator.comparing(FieldCapabilitiesIndexResponse::getIndexName)) @@ -261,6 +267,7 @@ private FieldCapabilitiesResponse merge( } } + task.ensureNotCancelled(); Map> responseMap = new HashMap<>(); for (Map.Entry> entry : responseMapBuilder.entrySet()) { Map typeMapBuilder = entry.getValue(); @@ -387,6 +394,7 @@ boolean isEmpty() { private class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(FieldCapabilitiesNodeRequest request, TransportChannel channel, Task task) throws Exception { + assert task instanceof CancellableTask; final ActionListener listener = new ChannelActionListener<>(channel, ACTION_NODE_NAME, request); ActionListener.completeWith(listener, () -> { final List allResponses = new ArrayList<>(); @@ -404,6 +412,7 @@ public void messageReceived(FieldCapabilitiesNodeRequest request, TransportChann for (ShardId shardId : shardIds) { try { final FieldCapabilitiesIndexResponse response = fetcher.fetch( + (CancellableTask) task, shardId, request.fields(), request.filters(), diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index 4f1a8d5f9d54..9ed35e303b4e 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; @@ -68,7 +68,7 @@ public GetRequest() {} GetRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readString(); } id = in.readString(); @@ -81,7 +81,7 @@ public GetRequest() {} this.versionType = VersionType.fromValue(in.readByte()); this.version = in.readLong(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::readFrom); - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -91,7 +91,7 @@ public GetRequest() {} @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); @@ -104,7 +104,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(versionType.getValue()); out.writeLong(version); out.writeOptionalWriteable(fetchSourceContext); - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 09f24eec5296..5774fc58d999 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -85,7 +85,7 @@ public Item() { public Item(StreamInput in) throws IOException { index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -179,7 +179,7 @@ public Item fetchSourceContext(FetchSourceContext fetchSourceContext) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); @@ -263,7 +263,7 @@ public MultiGetRequest(StreamInput in) throws IOException { refresh = in.readBoolean(); realtime = in.readBoolean(); items = in.readList(Item::new); - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -277,7 +277,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(refresh); out.writeBoolean(realtime); out.writeList(items); - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index eb979bc57855..b7c68a51ed60 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -57,7 +57,7 @@ public Failure(String index, String id, Exception exception) { Failure(StreamInput in) throws IOException { index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -88,7 +88,7 @@ public String getMessage() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java index fc641c31512a..9b63fcc469a8 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -62,7 +62,7 @@ public class MultiGetShardRequest extends SingleShardRequest> { public static final String NAME = "ingest_pipelines"; - private final IngestService ingestService; - private final FileSettingsService fileSettingsService; - /** * Creates a ReservedPipelineAction * - * @param ingestService requires {@link IngestService} for storing/deleting the pipelines - * @param fileSettingsService required for supplying the latest node infos */ - public ReservedPipelineAction(IngestService ingestService, FileSettingsService fileSettingsService) { - this.ingestService = ingestService; - this.fileSettingsService = fileSettingsService; - } + public ReservedPipelineAction() {} @Override public String name() { @@ -61,11 +51,9 @@ public String name() { private Collection prepare(List requests) { var exceptions = new ArrayList(); - NodesInfoResponse nodeInfos = fileSettingsService.nodeInfos(); - assert nodeInfos != null; for (var pipeline : requests) { try { - ingestService.validatePipelineRequest(pipeline, nodeInfos); + validate(pipeline); } catch (Exception e) { exceptions.add(e); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java index 2a75ec3b7681..30d13ab93489 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineTransportAction.java @@ -8,22 +8,47 @@ package org.elasticsearch.action.ingest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import java.util.Collection; import java.util.Map; +import java.util.Random; public class SimulatePipelineTransportAction extends HandledTransportAction { - + private static final Logger logger = LogManager.getLogger(SimulatePipelineTransportAction.class); + /** + * This is the amount of time given as the timeout for transport requests to the ingest node. + */ + public static final Setting INGEST_NODE_TRANSPORT_ACTION_TIMEOUT = Setting.timeSetting( + "ingest_node.transport_action_timeout", + TimeValue.timeValueSeconds(20), + TimeValue.timeValueMillis(1), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); private final IngestService ingestService; private final SimulateExecutionService executionService; + private final TransportService transportService; + private volatile TimeValue ingestNodeTransportActionTimeout; + // ThreadLocal because our unit testing framework does not like sharing Randoms across threads + private final ThreadLocal random = ThreadLocal.withInitial(Randomness::get); @Inject public SimulatePipelineTransportAction( @@ -35,30 +60,77 @@ public SimulatePipelineTransportAction( super(SimulatePipelineAction.NAME, transportService, actionFilters, SimulatePipelineRequest::new); this.ingestService = ingestService; this.executionService = new SimulateExecutionService(threadPool); + this.transportService = transportService; + this.ingestNodeTransportActionTimeout = INGEST_NODE_TRANSPORT_ACTION_TIMEOUT.get(ingestService.getClusterService().getSettings()); + ingestService.getClusterService() + .getClusterSettings() + .addSettingsUpdateConsumer( + INGEST_NODE_TRANSPORT_ACTION_TIMEOUT, + newTimeout -> this.ingestNodeTransportActionTimeout = newTimeout + ); } @Override protected void doExecute(Task task, SimulatePipelineRequest request, ActionListener listener) { final Map source = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - - final SimulatePipelineRequest.Parsed simulateRequest; + DiscoveryNodes discoveryNodes = ingestService.getClusterService().state().nodes(); + Map ingestNodes = discoveryNodes.getIngestNodes(); + if (ingestNodes.isEmpty()) { + /* + * Some resources used by pipelines, such as the geoip database, only exist on ingest nodes. Since we only run pipelines on + * nodes with the ingest role, we ought to only simulate a pipeline on nodes with the ingest role. + */ + listener.onFailure( + new IllegalStateException("There are no ingest nodes in this cluster, unable to forward request to an ingest node.") + ); + return; + } try { - if (request.getId() != null) { - simulateRequest = SimulatePipelineRequest.parseWithPipelineId( - request.getId(), - source, - request.isVerbose(), - ingestService, - request.getRestApiVersion() - ); + if (discoveryNodes.getLocalNode().isIngestNode()) { + final SimulatePipelineRequest.Parsed simulateRequest; + if (request.getId() != null) { + simulateRequest = SimulatePipelineRequest.parseWithPipelineId( + request.getId(), + source, + request.isVerbose(), + ingestService, + request.getRestApiVersion() + ); + } else { + simulateRequest = SimulatePipelineRequest.parse( + source, + request.isVerbose(), + ingestService, + request.getRestApiVersion() + ); + } + executionService.execute(simulateRequest, listener); } else { - simulateRequest = SimulatePipelineRequest.parse(source, request.isVerbose(), ingestService, request.getRestApiVersion()); + DiscoveryNode ingestNode = getRandomIngestNode(ingestNodes.values()); + logger.trace("forwarding request [{}] to ingest node [{}]", actionName, ingestNode); + ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>( + listener, + SimulatePipelineAction.INSTANCE.getResponseReader() + ); + if (task == null) { + transportService.sendRequest(ingestNode, actionName, request, handler); + } else { + transportService.sendChildRequest( + ingestNode, + actionName, + request, + task, + TransportRequestOptions.timeout(ingestNodeTransportActionTimeout), + handler + ); + } } } catch (Exception e) { listener.onFailure(e); - return; } + } - executionService.execute(simulateRequest, listener); + private DiscoveryNode getRandomIngestNode(Collection ingestNodes) { + return ingestNodes.toArray(new DiscoveryNode[0])[random.get().nextInt(ingestNodes.size())]; } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java index a40ff5fa955f..0271afb9a9b7 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -159,7 +160,11 @@ public SimulateProcessorResult(String type, String processorTag, String descript * Read from a stream. */ SimulateProcessorResult(StreamInput in) throws IOException { - this.processorTag = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + this.processorTag = in.readOptionalString(); + } else { + this.processorTag = in.readString(); + } this.ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); this.failure = in.readException(); this.description = in.readOptionalString(); @@ -174,7 +179,11 @@ public SimulateProcessorResult(String type, String processorTag, String descript @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(processorTag); + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + out.writeOptionalString(processorTag); + } else { + out.writeString(processorTag); + } out.writeOptionalWriteable(ingestDocument); out.writeException(failure); out.writeOptionalString(description); diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java index fa7bef5f5392..fda6393cd50a 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java @@ -111,7 +111,7 @@ protected void dispatchedShardOperationOnPrimary( ) { ActionListener.completeWith( listener, - () -> new WritePrimaryResult<>(performOnPrimary(request), new ResyncReplicationResponse(), null, null, primary, logger) + () -> new WritePrimaryResult<>(performOnPrimary(request), new ResyncReplicationResponse(), null, primary, logger) ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index b5fc113ee6e7..0f2caa7bbe49 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -544,7 +544,7 @@ public final void onShardFailure(final int shardIndex, SearchShardTarget shardTa if (TransportActions.isShardNotAvailableException(e)) { // Groups shard not available exceptions under a generic exception that returns a SERVICE_UNAVAILABLE(503) // temporary error. - e = new NoShardAvailableActionException(shardTarget.getShardId(), e.getMessage()); + e = NoShardAvailableActionException.forOnShardFailureWrapper(e.getMessage()); } // we don't aggregate shard on failures due to the internal cancellation, // but do keep the header counts right diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java index e811d838122b..8c5d828fff01 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; @@ -151,7 +151,7 @@ public CanMatchNodeRequest(StreamInput in) throws IOException { source = in.readOptionalWriteable(SearchSourceBuilder::new); indicesOptions = IndicesOptions.readIndicesOptions(in); searchType = SearchType.fromId(in.readByte()); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types no longer relevant so ignore String[] types = in.readStringArray(); if (types.length > 0) { @@ -177,7 +177,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(source); indicesOptions.writeIndicesOptions(out); out.writeByte(searchType.id()); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index 1a0f0ad6621d..4c3e29fdda2f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -10,11 +10,10 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.StepListener; -import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportResponse; @@ -33,7 +32,7 @@ public final class ClearScrollController implements Runnable { private final DiscoveryNodes nodes; private final SearchTransportService searchTransportService; - private final CountDown expectedOps; + private final RefCountingRunnable refs = new RefCountingRunnable(this::finish); private final ActionListener listener; private final AtomicBoolean hasFailed = new AtomicBoolean(false); private final AtomicInteger freedSearchContexts = new AtomicInteger(0); @@ -52,9 +51,7 @@ public final class ClearScrollController implements Runnable { this.searchTransportService = searchTransportService; this.listener = listener; List scrollIds = request.getScrollIds(); - final int expectedOps; if (scrollIds.size() == 1 && "_all".equals(scrollIds.get(0))) { - expectedOps = nodes.getSize(); runner = this::cleanAllScrolls; } else { // TODO: replace this with #closeContexts @@ -63,16 +60,8 @@ public final class ClearScrollController implements Runnable { SearchContextIdForNode[] context = parseScrollId(scrollId).getContext(); Collections.addAll(contexts, context); } - if (contexts.isEmpty()) { - expectedOps = 0; - runner = () -> listener.onResponse(new ClearScrollResponse(true, 0)); - } else { - expectedOps = contexts.size(); - runner = () -> cleanScrollIds(contexts); - } + runner = () -> cleanScrollIds(contexts); } - this.expectedOps = new CountDown(expectedOps); - } @Override @@ -81,44 +70,55 @@ public void run() { } void cleanAllScrolls() { - for (final DiscoveryNode node : nodes) { - try { - Transport.Connection connection = searchTransportService.getConnection(null, node); - searchTransportService.sendClearAllScrollContexts(connection, new ActionListener() { - @Override - public void onResponse(TransportResponse response) { - onFreedContext(true); - } + try { + for (final DiscoveryNode node : nodes) { + try { + Transport.Connection connection = searchTransportService.getConnection(null, node); + searchTransportService.sendClearAllScrollContexts(connection, ActionListener.releaseAfter(new ActionListener<>() { + @Override + public void onResponse(TransportResponse response) { + onFreedContext(true); + } - @Override - public void onFailure(Exception e) { - onFailedFreedContext(e, node); - } - }); - } catch (Exception e) { - onFailedFreedContext(e, node); + @Override + public void onFailure(Exception e) { + onFailedFreedContext(e, node); + } + }, refs.acquire())); + } catch (Exception e) { + onFailedFreedContext(e, node); + } } + } finally { + refs.close(); } } void cleanScrollIds(List contextIds) { SearchScrollAsyncAction.collectNodesAndRun(contextIds, nodes, searchTransportService, ActionListener.wrap(lookup -> { - for (SearchContextIdForNode target : contextIds) { - final DiscoveryNode node = lookup.apply(target.getClusterAlias(), target.getNode()); - if (node == null) { - onFreedContext(false); - } else { - try { - Transport.Connection connection = searchTransportService.getConnection(target.getClusterAlias(), node); - searchTransportService.sendFreeContext( - connection, - target.getSearchContextId(), - ActionListener.wrap(freed -> onFreedContext(freed.isFreed()), e -> onFailedFreedContext(e, node)) - ); - } catch (Exception e) { - onFailedFreedContext(e, node); + try { + for (SearchContextIdForNode target : contextIds) { + final DiscoveryNode node = lookup.apply(target.getClusterAlias(), target.getNode()); + if (node == null) { + onFreedContext(false); + } else { + try { + Transport.Connection connection = searchTransportService.getConnection(target.getClusterAlias(), node); + searchTransportService.sendFreeContext( + connection, + target.getSearchContextId(), + ActionListener.releaseAfter( + ActionListener.wrap(freed -> onFreedContext(freed.isFreed()), e -> onFailedFreedContext(e, node)), + refs.acquire() + ) + ); + } catch (Exception e) { + onFailedFreedContext(e, node); + } } } + } finally { + refs.close(); } }, listener::onFailure)); } @@ -127,22 +127,15 @@ private void onFreedContext(boolean freed) { if (freed) { freedSearchContexts.incrementAndGet(); } - if (expectedOps.countDown()) { - boolean succeeded = hasFailed.get() == false; - listener.onResponse(new ClearScrollResponse(succeeded, freedSearchContexts.get())); - } } private void onFailedFreedContext(Throwable e, DiscoveryNode node) { logger.warn(() -> "Clear SC failed on node[" + node + "]", e); - /* - * We have to set the failure marker before we count down otherwise we can expose the failure marker before we have set it to a - * racing thread successfully freeing a context. This would lead to that thread responding that the clear scroll succeeded. - */ hasFailed.set(true); - if (expectedOps.countDown()) { - listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); - } + } + + private void finish() { + listener.onResponse(new ClearScrollResponse(hasFailed.get() == false, freedSearchContexts.get())); } /** @@ -154,42 +147,39 @@ public static void closeContexts( Collection contextIds, ActionListener listener ) { - if (contextIds.isEmpty()) { - listener.onResponse(0); - return; - } final Set clusters = contextIds.stream() - .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) .map(SearchContextIdForNode::getClusterAlias) + .filter(clusterAlias -> Strings.isEmpty(clusterAlias) == false) .collect(Collectors.toSet()); final StepListener> lookupListener = new StepListener<>(); - if (clusters.isEmpty() == false) { - searchTransportService.getRemoteClusterService().collectNodes(clusters, lookupListener); - } else { + if (clusters.isEmpty()) { lookupListener.onResponse((cluster, nodeId) -> nodes.get(nodeId)); + } else { + searchTransportService.getRemoteClusterService().collectNodes(clusters, lookupListener); } - lookupListener.whenComplete(nodeLookup -> { - final GroupedActionListener groupedListener = new GroupedActionListener<>( - listener.map(rs -> Math.toIntExact(rs.stream().filter(r -> r).count())), - contextIds.size() - ); - for (SearchContextIdForNode contextId : contextIds) { - final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); - if (node == null) { - groupedListener.onResponse(false); - } else { - try { - final Transport.Connection connection = searchTransportService.getConnection(contextId.getClusterAlias(), node); - searchTransportService.sendFreeContext( - connection, - contextId.getSearchContextId(), - ActionListener.wrap(r -> groupedListener.onResponse(r.isFreed()), e -> groupedListener.onResponse(false)) - ); - } catch (Exception e) { - groupedListener.onResponse(false); + lookupListener.addListener(listener.delegateFailure((l, nodeLookup) -> { + final var successes = new AtomicInteger(); + try (RefCountingRunnable refs = new RefCountingRunnable(() -> l.onResponse(successes.get()))) { + for (SearchContextIdForNode contextId : contextIds) { + final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); + if (node != null) { + try { + searchTransportService.sendFreeContext( + searchTransportService.getConnection(contextId.getClusterAlias(), node), + contextId.getSearchContextId(), + refs.acquireListener().map(r -> { + if (r.isFreed()) { + successes.incrementAndGet(); + } + return null; + }) + ); + } catch (Exception e) { + // ignored + } } } } - }, listener::onFailure); + })); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 7927779c70b9..df81ce606ffa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -38,7 +38,7 @@ final class DfsQueryPhase extends SearchPhase { private final QueryPhaseResultConsumer queryResult; private final List searchResults; private final AggregatedDfs dfs; - private final DfsKnnResults knnResults; + private final List knnResults; private final Function, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final SearchTransportService searchTransportService; @@ -47,7 +47,7 @@ final class DfsQueryPhase extends SearchPhase { DfsQueryPhase( List searchResults, AggregatedDfs dfs, - DfsKnnResults knnResults, + List knnResults, QueryPhaseResultConsumer queryResult, Function, SearchPhase> nextPhaseFactory, SearchPhaseContext context @@ -132,20 +132,37 @@ public void onFailure(Exception exception) { private ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { SearchSourceBuilder source = request.source(); - if (source == null || source.knnSearch() == null) { + if (source == null || source.knnSearch().isEmpty()) { return request; } List scoreDocs = new ArrayList<>(); - for (ScoreDoc scoreDoc : knnResults.scoreDocs()) { - if (scoreDoc.shardIndex == request.shardRequestIndex()) { - scoreDocs.add(scoreDoc); + for (DfsKnnResults dfsKnnResults : knnResults) { + for (ScoreDoc scoreDoc : dfsKnnResults.scoreDocs()) { + if (scoreDoc.shardIndex == request.shardRequestIndex()) { + scoreDocs.add(scoreDoc); + } } } scoreDocs.sort(Comparator.comparingInt(scoreDoc -> scoreDoc.doc)); + // It is possible that the different results refer to the same doc. + for (int i = 0; i < scoreDocs.size() - 1; i++) { + ScoreDoc scoreDoc = scoreDocs.get(i); + int j = i + 1; + for (; j < scoreDocs.size(); j++) { + ScoreDoc otherScoreDoc = scoreDocs.get(j); + if (otherScoreDoc.doc != scoreDoc.doc) { + break; + } + scoreDoc.score += otherScoreDoc.score; + } + if (j > i + 1) { + scoreDocs.subList(i + 1, j).clear(); + } + } KnnScoreDocQueryBuilder knnQuery = new KnnScoreDocQueryBuilder(scoreDocs.toArray(new ScoreDoc[0])); - SearchSourceBuilder newSource = source.shallowCopy().knnSearch(null); + SearchSourceBuilder newSource = source.shallowCopy().knnSearch(List.of()); if (source.query() == null) { newSource.query(knnQuery); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 9e04c26b42eb..662b72e114a4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -87,7 +87,7 @@ protected void executePhaseOnShard( protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { final List dfsSearchResults = results.getAtomicArray().asList(); final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults); - final DfsKnnResults mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); + final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); return new DfsQueryPhase( dfsSearchResults, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 7dc1ddf0bba0..087957368bb1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -46,7 +46,6 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest.Suggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; -import org.elasticsearch.search.vectors.KnnSearchBuilder; import java.util.ArrayList; import java.util.Collection; @@ -128,26 +127,34 @@ public static AggregatedDfs aggregateDfs(Collection results) { return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); } - public static DfsKnnResults mergeKnnResults(SearchRequest request, List dfsSearchResults) { + public static List mergeKnnResults(SearchRequest request, List dfsSearchResults) { if (request.hasKnnSearch() == false) { return null; } - List topDocs = new ArrayList<>(); + List> topDocsLists = new ArrayList<>(request.source().knnSearch().size()); + for (int i = 0; i < request.source().knnSearch().size(); i++) { + topDocsLists.add(new ArrayList<>()); + } + for (DfsSearchResult dfsSearchResult : dfsSearchResults) { if (dfsSearchResult.knnResults() != null) { - ScoreDoc[] scoreDocs = dfsSearchResult.knnResults().scoreDocs(); - TotalHits totalHits = new TotalHits(scoreDocs.length, Relation.EQUAL_TO); - - TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs); - setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex()); - topDocs.add(shardTopDocs); + for (int i = 0; i < dfsSearchResult.knnResults().size(); i++) { + DfsKnnResults knnResults = dfsSearchResult.knnResults().get(i); + ScoreDoc[] scoreDocs = knnResults.scoreDocs(); + TotalHits totalHits = new TotalHits(scoreDocs.length, Relation.EQUAL_TO); + TopDocs shardTopDocs = new TopDocs(totalHits, scoreDocs); + setShardIndex(shardTopDocs, dfsSearchResult.getShardIndex()); + topDocsLists.get(i).add(shardTopDocs); + } } } - - KnnSearchBuilder knnSearch = request.source().knnSearch(); - TopDocs mergedTopDocs = TopDocs.merge(knnSearch.k(), topDocs.toArray(new TopDocs[0])); - return new DfsKnnResults(mergedTopDocs.scoreDocs); + List mergedResults = new ArrayList<>(request.source().knnSearch().size()); + for (int i = 0; i < request.source().knnSearch().size(); i++) { + TopDocs mergedTopDocs = TopDocs.merge(request.source().knnSearch().get(i).k(), topDocsLists.get(i).toArray(new TopDocs[0])); + mergedResults.add(new DfsKnnResults(mergedTopDocs.scoreDocs)); + } + return mergedResults; } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 6eb1b8adc7ad..412f90db7908 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -238,7 +239,7 @@ public SearchRequest(StreamInput in) throws IOException { preference = in.readOptionalString(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types no longer relevant so ignore String[] types = in.readStringArray(); if (types.length > 0) { @@ -262,16 +263,16 @@ public SearchRequest(StreamInput in) throws IOException { finalReduce = true; } ccsMinimizeRoundtrips = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_12_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0) && in.readBoolean()) { minCompatibleShardNode = Version.readVersion(in); } else { minCompatibleShardNode = null; } - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { waitForCheckpoints = in.readMap(StreamInput::readString, StreamInput::readLongArray); waitForCheckpointsTimeout = in.readTimeValue(); } - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -287,7 +288,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(preference); out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } @@ -303,27 +304,27 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(finalReduce); } out.writeBoolean(ccsMinimizeRoundtrips); - if (out.getVersion().onOrAfter(Version.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { out.writeBoolean(minCompatibleShardNode != null); if (minCompatibleShardNode != null) { Version.writeVersion(minCompatibleShardNode, out); } } - Version waitForCheckpointsVersion = Version.V_7_16_0; - if (out.getVersion().onOrAfter(waitForCheckpointsVersion)) { + TransportVersion waitForCheckpointsVersion = TransportVersion.V_7_16_0; + if (out.getTransportVersion().onOrAfter(waitForCheckpointsVersion)) { out.writeMap(waitForCheckpoints, StreamOutput::writeString, StreamOutput::writeLongArray); out.writeTimeValue(waitForCheckpointsTimeout); } else if (waitForCheckpoints.isEmpty() == false) { throw new IllegalArgumentException( - "Remote node version [" - + out.getVersion() + "Remote transport version [" + + out.getTransportVersion() + " incompatible with " - + "wait_for_checkpoints. All nodes must be version [" + + "wait_for_checkpoints. All nodes must use transport version [" + waitForCheckpointsVersion + "] or greater." ); } - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { @@ -743,7 +744,7 @@ public boolean isSuggestOnly() { * @return true if the request contains kNN search */ public boolean hasKnnSearch() { - return source != null && source.knnSearch() != null; + return source != null && source.knnSearch().isEmpty() == false; } public int resolveTrackTotalHitsUpTo() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 79a2d0d811ec..f40e8ccb84ec 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -177,7 +177,7 @@ public SearchRequestBuilder setPostFilter(QueryBuilder postFilter) { * Defines a kNN search. If a query is also provided, the kNN hits * are combined with the query hits. */ - public SearchRequestBuilder setKnnSearch(KnnSearchBuilder knnSearch) { + public SearchRequestBuilder setKnnSearch(List knnSearch) { sourceBuilder().knnSearch(knnSearch); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index bfef22dd12ec..5f42c1543da8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -84,7 +84,7 @@ public SearchResponse(StreamInput in) throws IOException { scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); skippedShards = in.readVInt(); - if (in.getVersion().onOrAfter(Version.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { pointInTimeId = in.readOptionalString(); } else { pointInTimeId = null; @@ -443,7 +443,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); out.writeVInt(skippedShards); - if (out.getVersion().onOrAfter(Version.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { out.writeOptionalString(pointInTimeId); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java index ed185577c84c..2e3472d08227 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java @@ -50,7 +50,14 @@ public final class SearchShardIterator implements Comparable shards, OriginalIndices originalIndices) { - this(clusterAlias, shardId, shards.stream().map(ShardRouting::currentNodeId).toList(), originalIndices, null, null); + this( + clusterAlias, + shardId, + shards.stream().filter(ShardRouting::isSearchable).map(ShardRouting::currentNodeId).toList(), + originalIndices, + null, + null + ); } public SearchShardIterator( @@ -61,6 +68,7 @@ public SearchShardIterator( ShardSearchContextId searchContextId, TimeValue searchContextKeepAlive ) { + // TODO ensure all target nodes hold shards with a searchable ShardRoutingRole - at the moment, PIT searches don't check this this.shardId = shardId; this.targetNodesIterator = new PlainIterator<>(targetNodeIds); this.originalIndices = originalIndices; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index ccf58bfeadd6..92f30eef2bd8 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -149,7 +150,8 @@ public void sendCanMatch( SearchTask task, final ActionListener listener ) { - if (connection.getVersion().onOrAfter(Version.V_7_16_0) && connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { + if (connection.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) + && connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { transportService.sendChildRequest( connection, QUERY_CAN_MATCH_NODE_NAME, diff --git a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java index 8da88798e488..5fad89a24118 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java +++ b/server/src/main/java/org/elasticsearch/action/search/ShardSearchFailure.java @@ -76,6 +76,7 @@ public ShardSearchFailure(Exception e, @Nullable SearchShardTarget shardTarget) /** * The search shard target the failure occurred on. + * @return The shardTarget, may be null */ @Nullable public SearchShardTarget shard() { @@ -95,7 +96,6 @@ public String toString() { public static ShardSearchFailure readShardSearchFailure(StreamInput in) throws IOException { return new ShardSearchFailure(in); - } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index 648f8e6c8ee0..a368cecd2677 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -108,7 +108,7 @@ protected void doExecute(Task task, MultiSearchRequest request, ActionListener= 1; + return activeShardCount == shardRoutingTable.size(); + } else if (value == 0) { + return true; + } else if (value == 1) { + return shardRoutingTable.hasSearchShards() ? shardRoutingTable.getActiveSearchShardCount() >= 1 : activeShardCount >= 1; } else { - return activeShardCount >= value; + return shardRoutingTable.getActiveSearchShardCount() >= value; } } @@ -189,5 +188,4 @@ public String toString() { default -> Integer.toString(value); }; } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java deleted file mode 100644 index 7f1ada5d6246..000000000000 --- a/server/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.support; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.BaseFuture; -import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; -import org.elasticsearch.core.TimeValue; - -import java.util.concurrent.TimeUnit; - -public abstract class AdapterActionFuture extends BaseFuture implements ActionFuture, ActionListener { - - @Override - public T actionGet() { - try { - return FutureUtils.get(this); - } catch (ElasticsearchException e) { - throw unwrapEsException(e); - } - } - - @Override - public T actionGet(String timeout) { - return actionGet(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".actionGet.timeout")); - } - - @Override - public T actionGet(long timeoutMillis) { - return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); - } - - @Override - public T actionGet(TimeValue timeout) { - return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); - } - - @Override - public T actionGet(long timeout, TimeUnit unit) { - try { - return FutureUtils.get(this, timeout, unit); - } catch (ElasticsearchException e) { - throw unwrapEsException(e); - } - } - - @Override - public void onResponse(L result) { - set(convert(result)); - } - - @Override - public void onFailure(Exception e) { - setException(e); - } - - protected abstract T convert(L listenerResponse); - - private static RuntimeException unwrapEsException(ElasticsearchException esEx) { - Throwable root = esEx.unwrapCause(); - if (root instanceof RuntimeException) { - return (RuntimeException) root; - } - return new UncategorizedExecutionException("Failed execution", root); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index 53674d8ac4ba..7e778f8a5fd8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -35,11 +35,7 @@ public ChannelActionListener(TransportChannel channel, String actionName, Reques @Override public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } + ActionListener.run(this, l -> l.channel.sendResponse(response)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/CountDownActionListener.java b/server/src/main/java/org/elasticsearch/action/support/CountDownActionListener.java new file mode 100644 index 000000000000..2dac0f4c8cb5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/CountDownActionListener.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Wraps another listener and adds a counter -- each invocation of this listener will decrement the counter, and when the counter has been + * exhausted the final invocation of this listener will delegate to the wrapped listener. Similar to {@link GroupedActionListener}, but for + * the cases where tracking individual results is not useful. + */ +public final class CountDownActionListener extends ActionListener.Delegating { + + private final AtomicInteger countDown; + private final AtomicReference failure = new AtomicReference<>(); + + /** + * Creates a new listener + * @param groupSize the group size + * @param delegate the delegate listener + */ + public CountDownActionListener(int groupSize, ActionListener delegate) { + super(Objects.requireNonNull(delegate)); + if (groupSize <= 0) { + assert false : "illegal group size [" + groupSize + "]"; + throw new IllegalArgumentException("groupSize must be greater than 0 but was " + groupSize); + } + countDown = new AtomicInteger(groupSize); + } + + private boolean countDown() { + final var result = countDown.getAndUpdate(current -> Math.max(0, current - 1)); + assert result > 0; + return result == 1; + } + + @Override + public void onResponse(Void element) { + if (countDown()) { + if (failure.get() != null) { + super.onFailure(failure.get()); + } else { + delegate.onResponse(element); + } + } + } + + @Override + public void onFailure(Exception e) { + if (failure.compareAndSet(null, e) == false) { + failure.accumulateAndGet(e, (current, update) -> { + // we have to avoid self-suppression! + if (update != current) { + current.addSuppressed(update); + } + return current; + }); + } + if (countDown()) { + super.onFailure(failure.get()); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java index 7a65f100456a..b3d7e74bf88a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java @@ -31,10 +31,10 @@ public final class GroupedActionListener extends ActionListener.Delegating> delegate, int groupSize) { + public GroupedActionListener(int groupSize, ActionListener> delegate) { super(delegate); if (groupSize <= 0) { assert false : "illegal group size [" + groupSize + "]"; diff --git a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java index bbac7cba72b4..fcb079ef1fd8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/ListenableActionFuture.java @@ -17,7 +17,7 @@ * A {@code Future} and {@link ActionListener} against which other {@link ActionListener}s can be registered later, to support * fanning-out a result to a dynamic collection of listeners. */ -public class ListenableActionFuture extends AdapterActionFuture { +public class ListenableActionFuture extends PlainActionFuture { private Object listeners; private boolean executedListeners = false; @@ -77,19 +77,10 @@ protected void done(boolean success) { } } - @Override - protected T convert(T listenerResponse) { - return listenerResponse; - } - private void executeListener(final ActionListener listener) { - try { - // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. - // here we know we will never block - listener.onResponse(actionGet(0)); - } catch (Exception e) { - listener.onFailure(e); - } + // we use a timeout of 0 to by pass assertion forbidding to call actionGet() (blocking) on a network thread. + // here we know we will never block + ActionListener.completeWith(listener, () -> actionGet(0)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/NodeResponseTracker.java b/server/src/main/java/org/elasticsearch/action/support/NodeResponseTracker.java deleted file mode 100644 index aafd6166cb36..000000000000 --- a/server/src/main/java/org/elasticsearch/action/support/NodeResponseTracker.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.support; - -import java.util.Collection; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - * This class tracks the intermediate responses that will be used to create aggregated cluster response to a request. It also gives the - * possibility to discard the intermediate results when asked, for example when the initial request is cancelled, in order to release the - * resources. - */ -public class NodeResponseTracker { - - private final AtomicInteger counter = new AtomicInteger(); - private final int expectedResponsesCount; - private volatile AtomicReferenceArray responses; - private volatile Exception causeOfDiscarding; - - public NodeResponseTracker(int size) { - this.expectedResponsesCount = size; - this.responses = new AtomicReferenceArray<>(size); - } - - public NodeResponseTracker(Collection array) { - this.expectedResponsesCount = array.size(); - this.responses = new AtomicReferenceArray<>(array.toArray()); - } - - /** - * This method discards the results collected so far to free up the resources. - * @param cause the discarding, this will be communicated if they try to access the discarded results - */ - public void discardIntermediateResponses(Exception cause) { - if (responses != null) { - this.causeOfDiscarding = cause; - responses = null; - } - } - - public boolean responsesDiscarded() { - return responses == null; - } - - /** - * This method stores a new node response if the intermediate responses haven't been discarded yet. If the responses are not discarded - * the method asserts that this is the first response encountered from this node to protect from miscounting the responses in case of a - * double invocation. If the responses have been discarded we accept this risk for simplicity. - * @param nodeIndex, the index that represents a single node of the cluster - * @param response, a response can be either a NodeResponse or an error - * @return true if all the nodes' responses have been received, else false - */ - public boolean trackResponseAndCheckIfLast(int nodeIndex, Object response) { - AtomicReferenceArray responses = this.responses; - - if (responsesDiscarded() == false) { - boolean firstEncounter = responses.compareAndSet(nodeIndex, null, response); - assert firstEncounter : "a response should be tracked only once"; - } - return counter.incrementAndGet() == getExpectedResponseCount(); - } - - /** - * Returns the tracked response or null if the response hasn't been received yet for a specific index that represents a node of the - * cluster. - * @throws DiscardedResponsesException if the responses have been discarded - */ - public Object getResponse(int nodeIndex) throws DiscardedResponsesException { - AtomicReferenceArray responses = this.responses; - if (responsesDiscarded()) { - throw new DiscardedResponsesException(causeOfDiscarding); - } - return responses.get(nodeIndex); - } - - public int getExpectedResponseCount() { - return expectedResponsesCount; - } - - /** - * This exception is thrown when the {@link NodeResponseTracker} is asked to give information about the responses after they have been - * discarded. - */ - public static class DiscardedResponsesException extends Exception { - - public DiscardedResponsesException(Exception cause) { - super(cause); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index 20483b8ff440..4239608ea3d7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -8,16 +8,74 @@ package org.elasticsearch.action.support; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.BaseFuture; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.TimeValue; import java.util.concurrent.TimeUnit; -public class PlainActionFuture extends AdapterActionFuture { +public class PlainActionFuture extends BaseFuture implements ActionFuture, ActionListener { public static PlainActionFuture newFuture() { return new PlainActionFuture<>(); } + @Override + public T actionGet() { + try { + return FutureUtils.get(this); + } catch (ElasticsearchException e) { + throw unwrapEsException(e); + } + } + + @Override + public T actionGet(String timeout) { + return actionGet(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".actionGet.timeout")); + } + + @Override + public T actionGet(long timeoutMillis) { + return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); + } + + @Override + public T actionGet(TimeValue timeout) { + return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); + } + + @Override + public T actionGet(long timeout, TimeUnit unit) { + try { + return FutureUtils.get(this, timeout, unit); + } catch (ElasticsearchException e) { + throw unwrapEsException(e); + } + } + + @Override + public void onResponse(T result) { + set(result); + } + + @Override + public void onFailure(Exception e) { + setException(e); + } + + private static RuntimeException unwrapEsException(ElasticsearchException esEx) { + Throwable root = esEx.unwrapCause(); + if (root instanceof RuntimeException) { + return (RuntimeException) root; + } + return new UncategorizedExecutionException("Failed execution", root); + } + public static T get(CheckedConsumer, E> e) throws E { PlainActionFuture fut = newFuture(); e.accept(fut); @@ -29,9 +87,4 @@ public static T get(CheckedConsumer + * try (var refs = new RefCountingListener(finalListener)) { + * for (var item : collection) { + * runAsyncAction(item, refs.acquire()); // completes the acquired listener on completion + * } + * } + * + * + * The delegate listener is completed when execution leaves the try-with-resources block and every acquired reference is released. The + * {@link RefCountingListener} collects (a bounded number of) exceptions received by its subsidiary listeners, and completes the delegate + * listener with an exception if (and only if) any subsidiary listener fails. However, unlike a {@link GroupedActionListener} it leaves it + * to the caller to collect the results of successful completions by accumulating them in a data structure of its choice. Also unlike a + * {@link GroupedActionListener} there is no need to declare the number of subsidiary listeners up front: listeners can be acquired + * dynamically as needed. Finally, you can continue to acquire additional listeners even outside the try-with-resources block, perhaps in a + * separate thread, as long as there's at least one listener outstanding: + * + *
+ * try (var refs = new RefCountingListener(finalListener)) {
+ *     for (var item : collection) {
+ *         if (condition(item)) {
+ *             runAsyncAction(item, refs.acquire(results::add));
+ *         }
+ *     }
+ *     if (flag) {
+ *         runOneOffAsyncAction(refs.acquire(results::add));
+ *         return;
+ *     }
+ *     for (var item : otherCollection) {
+ *         var itemRef = refs.acquire(); // delays completion while the background action is pending
+ *         executorService.execute(() -> {
+ *             try {
+ *                 if (condition(item)) {
+ *                     runOtherAsyncAction(item, refs.acquire(results::add));
+ *                 }
+ *             } finally {
+ *                 itemRef.onResponse(null);
+ *             }
+ *         });
+ *     }
+ * }
+ * 
+ * + * In particular (and also unlike a {@link GroupedActionListener}) this works even if you don't acquire any extra refs at all: in that case, + * the delegate listener is completed at the end of the try-with-resources block. + */ +public final class RefCountingListener implements Releasable { + + private final ActionListener delegate; + private final RefCountingRunnable refs = new RefCountingRunnable(this::finish); + + private final AtomicReference exceptionRef = new AtomicReference<>(); + private final Semaphore exceptionPermits; + private final AtomicInteger droppedExceptionsRef = new AtomicInteger(); + + /** + * Construct a {@link RefCountingListener} which completes {@code delegate} when all refs are released. + * @param delegate The listener to complete when all refs are released. This listener must not throw any exception on completion. If all + * the acquired listeners completed successfully then so is the delegate. If any of the acquired listeners completed + * with failure then the delegate is completed with the first exception received, with other exceptions added to its + * collection of suppressed exceptions. + */ + public RefCountingListener(ActionListener delegate) { + this(10, delegate); + } + + /** + * Construct a {@link RefCountingListener} which completes {@code delegate} when all refs are released. + * @param delegate The listener to complete when all refs are released. This listener must not throw any exception on completion. If all + * the acquired listeners completed successfully then so is the delegate. If any of the acquired listeners completed + * with failure then the delegate is completed with the first exception received, with other exceptions added to its + * collection of suppressed exceptions. + * @param maxExceptions The maximum number of exceptions to accumulate on failure. + */ + public RefCountingListener(int maxExceptions, ActionListener delegate) { + if (maxExceptions <= 0) { + assert false : maxExceptions; + throw new IllegalArgumentException("maxExceptions must be positive"); + } + this.delegate = ActionListener.assertOnce(Objects.requireNonNull(delegate)); + this.exceptionPermits = new Semaphore(maxExceptions); + } + + /** + * Release the original reference to this object, which commpletes the delegate {@link ActionListener} if there are no other references. + * + * It is invalid to call this method more than once. Doing so will trip an assertion if assertions are enabled, but will be ignored + * otherwise. This deviates from the contract of {@link java.io.Closeable}. + */ + @Override + public void close() { + refs.close(); + } + + private void finish() { + try { + var exception = exceptionRef.get(); + if (exception == null) { + delegate.onResponse(null); + } else { + final var droppedExceptions = droppedExceptionsRef.getAndSet(0); + if (droppedExceptions > 0) { + exception.addSuppressed(new ElasticsearchException(droppedExceptions + " further exceptions were dropped")); + } + delegate.onFailure(exception); + } + } catch (Exception e) { + assert false : e; + throw e; + } + } + + /** + * Acquire a reference to this object and return a listener which releases it. The delegate {@link ActionListener} is called when all + * its references have been released. + * + * It is invalid to call this method once all references are released. Doing so will trip an assertion if assertions are enabled, and + * will throw an {@link IllegalStateException} otherwise. + * + * It is also invalid to complete the returned listener more than once. Doing so will trip an assertion if assertions are enabled, but + * will be ignored otherwise. + */ + public ActionListener acquire() { + return new ActionListener<>() { + private final Releasable ref = refs.acquire(); + + @Override + public void onResponse(Void unused) { + ref.close(); + } + + @Override + public void onFailure(Exception e) { + try (ref) { + addException(e); + } + } + + @Override + public String toString() { + return RefCountingListener.this.toString(); + } + }; + } + + /** + * Acquire a reference to this object and return a listener which consumes a response and releases the reference. The delegate {@link + * ActionListener} is called when all its references have been released. The consumer must not throw any exception. + * + * It is invalid to call this method once all references are released. Doing so will trip an assertion if assertions are enabled, and + * will throw an {@link IllegalStateException} otherwise. + * + * It is also invalid to complete the returned listener more than once. Doing so will trip an assertion if assertions are enabled, but + * will be ignored otherwise. + */ + public ActionListener acquire(Consumer consumer) { + final var ref = refs.acquire(); + final var consumerRef = new AtomicReference<>(Objects.requireNonNull(consumer)); + return new ActionListener<>() { + @Override + public void onResponse(Response response) { + try (ref) { + var acquiredConsumer = consumerRef.getAndSet(null); + if (acquiredConsumer == null) { + assert false : "already closed"; + } else { + acquiredConsumer.accept(response); + } + } catch (Exception e) { + assert false : e; + throw e; + } + } + + @Override + public void onFailure(Exception e) { + try (ref) { + var acquiredConsumer = consumerRef.getAndSet(null); + assert acquiredConsumer != null : "already closed"; + addException(e); + } + } + + @Override + public String toString() { + return RefCountingListener.this + "[" + consumerRef.get() + "]"; + } + }; + } + + private void addException(Exception e) { + if (exceptionPermits.tryAcquire()) { + final var firstException = exceptionRef.compareAndExchange(null, e); + if (firstException != null && firstException != e) { + firstException.addSuppressed(e); + } + } else { + droppedExceptionsRef.incrementAndGet(); + } + } + + @Override + public String toString() { + return "refCounting[" + delegate + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java new file mode 100644 index 000000000000..c3a5bb398972 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/RefCountingRunnable.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A mechanism to trigger an action on the completion of some (dynamic) collection of other actions. Basic usage is as follows: + * + *
+ * try (var refs = new RefCountingRunnable(finalRunnable)) {
+ *     for (var item : collection) {
+ *         runAsyncAction(item, refs.acquire()); // releases the acquired ref on completion
+ *     }
+ * }
+ * 
+ * + * The delegate action is completed when execution leaves the try-with-resources block and every acquired reference is released. Unlike a + * {@link CountDown} there is no need to declare the number of subsidiary actions up front (refs can be acquired dynamically as needed) nor + * does the caller need to check for completion each time a reference is released. Moreover even outside the try-with-resources block you + * can continue to acquire additional references, even in a separate thread, as long as there's at least one reference outstanding: + * + *
+ * try (var refs = new RefCountingRunnable(finalRunnable)) {
+ *     for (var item : collection) {
+ *         if (condition(item)) {
+ *             runAsyncAction(item, refs.acquire());
+ *         }
+ *     }
+ *     if (flag) {
+ *         runOneOffAsyncAction(refs.acquire());
+ *         return;
+ *     }
+ *     for (var item : otherCollection) {
+ *         var itemRef = refs.acquire(); // delays completion while the background action is pending
+ *         executorService.execute(() -> {
+ *             try (var ignored = itemRef) {
+ *                 if (condition(item)) {
+ *                     runOtherAsyncAction(item, refs.acquire());
+ *                 }
+ *             }
+ *         });
+ *     }
+ * }
+ * 
+ * + * In particular (and also unlike a {@link CountDown}) this works even if you don't acquire any extra refs at all: in that case, the + * delegate action executes at the end of the try-with-resources block. + */ +public final class RefCountingRunnable implements Releasable { + + private static final Logger logger = LogManager.getLogger(RefCountingRunnable.class); + static final String ALREADY_CLOSED_MESSAGE = "already closed, cannot acquire or release any further refs"; + + private final RefCounted refCounted; + private final AtomicBoolean originalRefReleased = new AtomicBoolean(); + + private class AcquiredRef implements Releasable { + private final AtomicBoolean released = new AtomicBoolean(); + + @Override + public void close() { + releaseRef(released); + } + + @Override + public String toString() { + return RefCountingRunnable.this.toString(); + } + } + + /** + * Construct a {@link RefCountingRunnable} which executes {@code delegate} when all refs are released. + * @param delegate The action to execute when all refs are released. This action must not throw any exception. + */ + public RefCountingRunnable(Runnable delegate) { + this.refCounted = AbstractRefCounted.of(delegate); + } + + /** + * Acquire a reference to this object and return an action which releases it. The delegate {@link Runnable} is called when all its + * references have been released. + * + * It is invalid to call this method once all references are released. Doing so will trip an assertion if assertions are enabled, and + * will throw an {@link IllegalStateException} otherwise. + * + * It is also invalid to release the acquired resource more than once. Doing so will trip an assertion if assertions are enabled, but + * will be ignored otherwise. This deviates from the contract of {@link java.io.Closeable}. + */ + public Releasable acquire() { + if (refCounted.tryIncRef()) { + return new AcquiredRef(); + } + assert false : ALREADY_CLOSED_MESSAGE; + throw new IllegalStateException(ALREADY_CLOSED_MESSAGE); + } + + /** + * Acquire a reference to this object and return a listener which releases it when notified. The delegate {@link Runnable} is called + * when all its references have been released. + */ + public ActionListener acquireListener() { + return ActionListener.releasing(acquire()); + } + + /** + * Release the original reference to this object, which executes the delegate {@link Runnable} if there are no other references. + * + * It is invalid to call this method more than once. Doing so will trip an assertion if assertions are enabled, but will be ignored + * otherwise. This deviates from the contract of {@link java.io.Closeable}. + */ + @Override + public void close() { + releaseRef(originalRefReleased); + } + + private void releaseRef(AtomicBoolean released) { + if (released.compareAndSet(false, true)) { + try { + refCounted.decRef(); + } catch (Exception e) { + logger.error("exception in delegate", e); + assert false : e; + } + } else { + assert false : "already closed"; + } + } + + @Override + public String toString() { + return refCounted.toString(); + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index f85a365a1500..6e575a7f7dd9 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -8,39 +8,37 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.ExecutorService; /** - * An action listener that wraps another action listener and threading its execution. + * An action listener that wraps another action listener and dispatches its completion to an executor. */ public final class ThreadedActionListener extends ActionListener.Delegating { - private final Logger logger; - private final ThreadPool threadPool; - private final String executor; + private static final Logger logger = LogManager.getLogger(ThreadedActionListener.class); + + private final ExecutorService executor; private final boolean forceExecution; - public ThreadedActionListener( - Logger logger, - ThreadPool threadPool, - String executor, - ActionListener listener, - boolean forceExecution - ) { + public ThreadedActionListener(ExecutorService executor, ActionListener listener) { + this(executor, false, listener); + } + + public ThreadedActionListener(ExecutorService executor, boolean forceExecution, ActionListener listener) { super(listener); - this.logger = logger; - this.threadPool = threadPool; - this.executor = executor; this.forceExecution = forceExecution; + this.executor = executor; } @Override public void onResponse(final Response response) { - threadPool.executor(executor).execute(new ActionRunnable<>(delegate) { + executor.execute(new ActionRunnable<>(delegate) { @Override public boolean isForceExecution() { return forceExecution; @@ -60,7 +58,7 @@ public String toString() { @Override public void onFailure(final Exception e) { - threadPool.executor(executor).execute(new AbstractRunnable() { + executor.execute(new AbstractRunnable() { @Override public boolean isForceExecution() { return forceExecution; @@ -84,8 +82,8 @@ public void onRejection(Exception e2) { @Override public void onFailure(Exception e) { + logger.error(() -> "failed to execute failure callback on [" + ThreadedActionListener.this + "]", e); assert false : e; - logger.error(() -> "failed to execute failure callback on [" + delegate + "]", e); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java index de3911752964..4d3b9b0c15ff 100644 --- a/server/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -111,11 +111,7 @@ private TaskResultStoringActionListener(TaskManager taskManager, Task task, Acti @Override public void onResponse(Response response) { - try { - taskManager.storeResult(task, response, delegate); - } catch (Exception e) { - delegate.onFailure(e); - } + ActionListener.run(delegate, l -> taskManager.storeResult(task, response, l)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/ChunkedBroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/ChunkedBroadcastResponse.java new file mode 100644 index 000000000000..6564e30799ce --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/ChunkedBroadcastResponse.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.action.support.broadcast; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; + +public abstract class ChunkedBroadcastResponse extends BaseBroadcastResponse implements ChunkedToXContentObject { + public ChunkedBroadcastResponse(StreamInput in) throws IOException { + super(in); + } + + public ChunkedBroadcastResponse( + int totalShards, + int successfulShards, + int failedShards, + List shardFailures + ) { + super(totalShards, successfulShards, failedShards, shardFailures); + } + + @Override + public final Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat(Iterators.single((b, p) -> { + b.startObject(); + RestActions.buildBroadcastShardsHeader(b, p, this); + return b; + }), customXContentChunks(params), Iterators.single((builder, p) -> builder.endObject())); + } + + protected abstract Iterator customXContentChunks(ToXContent.Params params); +} diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 7e0b636d0056..6132b61a304c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -137,11 +137,7 @@ protected AsyncBroadcastAction(Task task, Request request, ActionListener(0), clusterState)); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> newResponse(request, new AtomicReferenceArray(0), clusterState)); return; } // count the local operations, and perform the non local ones @@ -247,11 +243,7 @@ protected AtomicReferenceArray shardsResponses() { } protected void finishHim() { - try { - listener.onResponse(newResponse(request, shardsResponses, clusterState)); - } catch (Exception e) { - listener.onFailure(e); - } + ActionListener.completeWith(listener, () -> newResponse(request, shardsResponses, clusterState)); } void setFailure(ShardIterator shardIt, int shardIndex, Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 18e8b8b0f07c..f057d3e671a4 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -8,14 +8,21 @@ package org.elasticsearch.action.support.broadcast.node; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.NodeResponseTracker; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastRequest; @@ -23,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardsIterator; @@ -31,18 +37,16 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -51,7 +55,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; import static org.elasticsearch.core.Strings.format; @@ -71,6 +74,8 @@ public abstract class TransportBroadcastByNodeAction< Response extends BaseBroadcastResponse, ShardOperationResult extends Writeable> extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportBroadcastByNodeAction.class); + private final ClusterService clusterService; private final TransportService transportService; private final IndexNameExpressionResolver indexNameExpressionResolver; @@ -117,48 +122,6 @@ public TransportBroadcastByNodeAction( ); } - private Response newResponse( - Request request, - NodeResponseTracker nodeResponseTracker, - int unavailableShardCount, - Map> nodes, - ClusterState clusterState - ) throws NodeResponseTracker.DiscardedResponsesException { - int totalShards = 0; - int successfulShards = 0; - List broadcastByNodeResponses = new ArrayList<>(); - List exceptions = new ArrayList<>(); - for (int i = 0; i < nodeResponseTracker.getExpectedResponseCount(); i++) { - Object response = nodeResponseTracker.getResponse(i); - if (response instanceof FailedNodeException exception) { - totalShards += nodes.get(exception.nodeId()).size(); - for (ShardRouting shard : nodes.get(exception.nodeId())) { - exceptions.add(new DefaultShardOperationFailedException(shard.getIndexName(), shard.getId(), exception)); - } - } else { - @SuppressWarnings("unchecked") - NodeResponse nodeResponse = (NodeResponse) response; - broadcastByNodeResponses.addAll(nodeResponse.results); - totalShards += nodeResponse.getTotalShards(); - successfulShards += nodeResponse.getSuccessfulShards(); - for (BroadcastShardOperationFailedException throwable : nodeResponse.getExceptions()) { - if (TransportActions.isShardNotAvailableException(throwable) == false) { - exceptions.add( - new DefaultShardOperationFailedException( - throwable.getShardId().getIndexName(), - throwable.getShardId().getId(), - throwable - ) - ); - } - } - } - } - totalShards += unavailableShardCount; - int failedShards = exceptions.size(); - return newResponse(request, totalShards, successfulShards, failedShards, broadcastByNodeResponses, exceptions, clusterState); - } - /** * Deserialize a shard-level result from an input stream * @@ -167,27 +130,31 @@ private Response newResponse( */ protected abstract ShardOperationResult readShardResult(StreamInput in) throws IOException; + public interface ResponseFactory { + /** + * Creates a new response to the underlying request. + * + * @param totalShards the total number of shards considered for execution of the operation + * @param successfulShards the total number of shards for which execution of the operation was successful + * @param failedShards the total number of shards for which execution of the operation failed + * @param results the per-node aggregated shard-level results + * @param shardFailures the exceptions corresponding to shard operation failures + * @return the response + */ + Response newResponse( + int totalShards, + int successfulShards, + int failedShards, + List results, + List shardFailures + ); + } + /** - * Creates a new response to the underlying request. - * - * @param request the underlying request - * @param totalShards the total number of shards considered for execution of the operation - * @param successfulShards the total number of shards for which execution of the operation was successful - * @param failedShards the total number of shards for which execution of the operation failed - * @param results the per-node aggregated shard-level results - * @param shardFailures the exceptions corresponding to shard operation failures - * @param clusterState the cluster state - * @return the response + * Create a response factory based on the requst and the cluster state captured at the time the request was handled. Implementations + * must avoid capturing the full cluster state if possible. */ - protected abstract Response newResponse( - Request request, - int totalShards, - int successfulShards, - int failedShards, - List results, - List shardFailures, - ClusterState clusterState - ); + protected abstract ResponseFactory getResponseFactory(Request request, ClusterState clusterState); /** * Deserialize a request from an input stream @@ -255,341 +222,312 @@ protected String[] resolveConcreteIndexNames(ClusterState clusterState, Request @Override protected void doExecute(Task task, Request request, ActionListener listener) { - new AsyncAction(task, request, listener).start(); - } - - protected class AsyncAction implements CancellableTask.CancellationListener { - private final Task task; - private final Request request; - private final ActionListener listener; - private final ClusterState clusterState; - private final DiscoveryNodes nodes; - private final Map> nodeIds; - private final int unavailableShardCount; - private final NodeResponseTracker nodeResponseTracker; - - protected AsyncAction(Task task, Request request, ActionListener listener) { - this.task = task; - this.request = request; - this.listener = listener; - - clusterState = clusterService.state(); - nodes = clusterState.nodes(); - - ClusterBlockException globalBlockException = checkGlobalBlock(clusterState, request); - if (globalBlockException != null) { - throw globalBlockException; - } + final var clusterState = clusterService.state(); - String[] concreteIndices = resolveConcreteIndexNames(clusterState, request); - ClusterBlockException requestBlockException = checkRequestBlock(clusterState, request, concreteIndices); - if (requestBlockException != null) { - throw requestBlockException; - } + final var globalBlockException = checkGlobalBlock(clusterState, request); + if (globalBlockException != null) { + throw globalBlockException; + } - if (logger.isTraceEnabled()) { - logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version()); - } - ShardsIterator shardIt = shards(clusterState, request, concreteIndices); - nodeIds = new HashMap<>(); - - int unavailableShardCount = 0; - for (ShardRouting shard : shardIt) { - // send a request to the shard only if it is assigned to a node that is in the local node's cluster state - // a scenario in which a shard can be assigned but to a node that is not in the local node's cluster state - // is when the shard is assigned to the master node, the local node has detected the master as failed - // and a new master has not yet been elected; in this situation the local node will have removed the - // master node from the local cluster state, but the shards assigned to the master will still be in the - // routing table as such - if (shard.assignedToNode() && nodes.get(shard.currentNodeId()) != null) { - String nodeId = shard.currentNodeId(); - if (nodeIds.containsKey(nodeId) == false) { - nodeIds.put(nodeId, new ArrayList<>()); - } - nodeIds.get(nodeId).add(shard); - } else { - unavailableShardCount++; - } + final var concreteIndices = resolveConcreteIndexNames(clusterState, request); + final var requestBlockException = checkRequestBlock(clusterState, request, concreteIndices); + if (requestBlockException != null) { + throw requestBlockException; + } + logger.trace(() -> format("resolving shards for [%s] based on cluster state version [%s]", actionName, clusterState.version())); + final ShardsIterator shardIt = shards(clusterState, request, concreteIndices); + final Map> shardsByNodeId = new HashMap<>(); + + final var nodes = clusterState.nodes(); + int unavailableShardCount = 0; + int availableShardCount = 0; + for (final var shard : shardIt) { + // send a request to the shard only if it is assigned to a node that is in the local node's cluster state + // a scenario in which a shard can be assigned but to a node that is not in the local node's cluster state + // is when the shard is assigned to the master node, the local node has detected the master as failed + // and a new master has not yet been elected; in this situation the local node will have removed the + // master node from the local cluster state, but the shards assigned to the master will still be in the + // routing table as such + final var nodeId = shard.currentNodeId(); + if (nodeId != null && nodes.get(nodeId) != null) { + shardsByNodeId.computeIfAbsent(nodeId, n -> new ArrayList<>()).add(shard); + availableShardCount += 1; + } else { + unavailableShardCount++; } - this.unavailableShardCount = unavailableShardCount; - nodeResponseTracker = new NodeResponseTracker(nodeIds.size()); } - public void start() { + executeAsCoordinatingNode( + task, + request, + shardsByNodeId, + unavailableShardCount, + availableShardCount, + nodes, + getResponseFactory(request, clusterState), + listener + ); + } + + private void executeAsCoordinatingNode( + Task task, + Request request, + Map> shardsByNodeId, + int unavailableShardCount, + int availableShardCount, + DiscoveryNodes nodes, + ResponseFactory responseFactory, + ActionListener listener + ) { + final var mutex = new Object(); + final var shardResponses = new ArrayList(availableShardCount); + final var exceptions = new ArrayList(0); + final var totalShards = new AtomicInteger(unavailableShardCount); + final var successfulShards = new AtomicInteger(0); + + final var resultListener = new ListenableFuture(); + final var resultListenerCompleter = new RunOnce(() -> { if (task instanceof CancellableTask cancellableTask) { - cancellableTask.addListener(this); - } - if (nodeIds.size() == 0) { - try { - onCompletion(); - } catch (Exception e) { - listener.onFailure(e); - } - } else { - int nodeIndex = -1; - for (Map.Entry> entry : nodeIds.entrySet()) { - nodeIndex++; - DiscoveryNode node = nodes.get(entry.getKey()); - sendNodeRequest(node, entry.getValue(), nodeIndex); + if (cancellableTask.notifyIfCancelled(resultListener)) { + return; } } + // ref releases all happen-before here so no need to be synchronized + resultListener.onResponse( + responseFactory.newResponse(totalShards.get(), successfulShards.get(), exceptions.size(), shardResponses, exceptions) + ); + }); + + final var nodeFailureListeners = new ListenableFuture(); + if (task instanceof CancellableTask cancellableTask) { + cancellableTask.addListener(() -> { + assert cancellableTask.isCancelled(); + resultListenerCompleter.run(); + cancellableTask.notifyIfCancelled(nodeFailureListeners); + }); } - private void sendNodeRequest(final DiscoveryNode node, List shards, final int nodeIndex) { - try { - final NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards); - if (task != null) { - nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); - } + final var transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); - final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); + try (var refs = new RefCountingRunnable(() -> { + resultListener.addListener(listener); + resultListenerCompleter.run(); + })) { + for (final var entry : shardsByNodeId.entrySet()) { + final var node = nodes.get(entry.getKey()); + final var shards = entry.getValue(); - transportService.sendRequest( - node, - transportNodeBroadcastAction, - nodeRequest, - transportRequestOptions, - new TransportResponseHandler() { - @Override - public NodeResponse read(StreamInput in) throws IOException { - return new NodeResponse(in); + final ActionListener nodeResponseListener = ActionListener.notifyOnce(new ActionListener() { + @Override + public void onResponse(NodeResponse nodeResponse) { + synchronized (mutex) { + shardResponses.addAll(nodeResponse.getResults()); } - - @Override - public void handleResponse(NodeResponse response) { - onNodeResponse(node, nodeIndex, response); + totalShards.addAndGet(nodeResponse.getTotalShards()); + successfulShards.addAndGet(nodeResponse.getSuccessfulShards()); + + for (BroadcastShardOperationFailedException exception : nodeResponse.getExceptions()) { + if (TransportActions.isShardNotAvailableException(exception)) { + assert node.getVersion().before(Version.V_8_7_0) : node; // we stopped sending these ignored exceptions + } else { + synchronized (mutex) { + exceptions.add( + new DefaultShardOperationFailedException( + exception.getShardId().getIndexName(), + exception.getShardId().getId(), + exception + ) + ); + } + } } + } - @Override - public void handleException(TransportException exp) { - onNodeFailure(node, nodeIndex, exp); + @Override + public void onFailure(Exception e) { + if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { + return; } - } - ); - } catch (Exception e) { - onNodeFailure(node, nodeIndex, e); - } - } - protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) { - if (logger.isTraceEnabled()) { - logger.trace("received response for [{}] from node [{}]", actionName, node.getId()); - } + logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, node), e); - if (nodeResponseTracker.trackResponseAndCheckIfLast(nodeIndex, response)) { - onCompletion(); - } - } + final var failedNodeException = new FailedNodeException(node.getId(), "Failed node [" + node.getId() + "]", e); + synchronized (mutex) { + for (ShardRouting shard : shards) { + exceptions.add( + new DefaultShardOperationFailedException(shard.getIndexName(), shard.getId(), failedNodeException) + ); + } + } - protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { - String nodeId = node.getId(); - logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, nodeId), t); - if (nodeResponseTracker.trackResponseAndCheckIfLast( - nodeIndex, - new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t) - )) { - onCompletion(); - } - } + totalShards.addAndGet(shards.size()); + } - protected void onCompletion() { - if ((task instanceof CancellableTask t) && t.notifyIfCancelled(listener)) { - return; - } + @Override + public String toString() { + return "[" + actionName + "][" + node.descriptionWithoutAttributes() + "]"; + } + }); - Response response = null; - try { - response = newResponse(request, nodeResponseTracker, unavailableShardCount, nodeIds, clusterState); - } catch (NodeResponseTracker.DiscardedResponsesException e) { - // We propagate the reason that the results, in this case the task cancellation, in case the listener needs to take - // follow-up actions - listener.onFailure((Exception) e.getCause()); - } catch (Exception e) { - logger.debug("failed to combine responses from nodes", e); - listener.onFailure(e); - } - if (response != null) { - try { - listener.onResponse(response); - } catch (Exception e) { - listener.onFailure(e); + if (task instanceof CancellableTask) { + nodeFailureListeners.addListener(nodeResponseListener); } - } - } - @Override - public void onCancelled() { - assert task instanceof CancellableTask : "task must be cancellable"; - try { - ((CancellableTask) task).ensureNotCancelled(); - } catch (TaskCancelledException e) { - nodeResponseTracker.discardIntermediateResponses(e); - } - } + final var nodeRequest = new NodeRequest(request, shards, node.getId()); + if (task != null) { + nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); + } - // For testing purposes - public NodeResponseTracker getNodeResponseTracker() { - return nodeResponseTracker; + transportService.sendRequest( + node, + transportNodeBroadcastAction, + nodeRequest, + transportRequestOptions, + new ActionListenerResponseHandler<>( + ActionListener.releaseAfter(nodeResponseListener, refs.acquire()), + NodeResponse::new + ) + ); + } } } class BroadcastByNodeTransportRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeRequest request, TransportChannel channel, Task task) throws Exception { - List shards = request.getShards(); - final int totalShards = shards.size(); - if (logger.isTraceEnabled()) { - logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards); + executeAsDataNode( + task, + request.getIndicesLevelRequest(), + request.getShards(), + request.getNodeId(), + new ChannelActionListener<>(channel, transportNodeBroadcastAction, request) + ); + } + } + + private void executeAsDataNode( + Task task, + Request request, + List shards, + String nodeId, + ActionListener listener + ) { + logger.trace("[{}] executing operation on [{}] shards", actionName, shards.size()); + + final var results = new ArrayList(shards.size()); + final var exceptions = new ArrayList(0); + + final var resultListener = new ListenableFuture(); + final var resultListenerCompleter = new RunOnce(() -> { + if (task instanceof CancellableTask cancellableTask) { + if (cancellableTask.notifyIfCancelled(resultListener)) { + return; + } } - final AtomicArray shardResultOrExceptions = new AtomicArray<>(totalShards); + // ref releases all happen-before here so no need to be synchronized + resultListener.onResponse(new NodeResponse(nodeId, shards.size(), results, exceptions)); + }); + + final var shardFailureListeners = new ListenableFuture(); + if (task instanceof CancellableTask cancellableTask) { + cancellableTask.addListener(() -> { + assert cancellableTask.isCancelled(); + resultListenerCompleter.run(); + cancellableTask.notifyIfCancelled(shardFailureListeners); + }); + } - final AtomicInteger counter = new AtomicInteger(shards.size()); - int shardIndex = -1; - for (final ShardRouting shardRouting : shards) { - shardIndex++; - final int finalShardIndex = shardIndex; - onShardOperation(request, shardRouting, task, ActionListener.notifyOnce(new ActionListener() { + try (var refs = new RefCountingRunnable(() -> { + resultListener.addListener(listener); + resultListenerCompleter.run(); + })) { + for (final var shardRouting : shards) { + if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { + return; + } + final ActionListener shardListener = ActionListener.notifyOnce(new ActionListener<>() { @Override public void onResponse(ShardOperationResult shardOperationResult) { - shardResultOrExceptions.setOnce(finalShardIndex, shardOperationResult); - if (counter.decrementAndGet() == 0) { - finishHim(request, channel, task, shardResultOrExceptions); + logger.trace(() -> format("[%s] completed operation for shard [%s]", actionName, shardRouting.shortSummary())); + synchronized (results) { + results.add(shardOperationResult); } } @Override public void onFailure(Exception e) { - shardResultOrExceptions.setOnce(finalShardIndex, e); - if (counter.decrementAndGet() == 0) { - finishHim(request, channel, task, shardResultOrExceptions); + if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { + return; } - } - })); - } - } - - @SuppressWarnings("unchecked") - private void finishHim(NodeRequest request, TransportChannel channel, Task task, AtomicArray shardResultOrExceptions) { - if (task instanceof CancellableTask) { - try { - ((CancellableTask) task).ensureNotCancelled(); - } catch (TaskCancelledException e) { - try { - channel.sendResponse(e); - } catch (IOException ioException) { - e.addSuppressed(ioException); - logger.warn("failed to send response", e); - } - return; - } - } - List accumulatedExceptions = new ArrayList<>(); - List results = new ArrayList<>(); - for (int i = 0; i < shardResultOrExceptions.length(); i++) { - if (shardResultOrExceptions.get(i) instanceof BroadcastShardOperationFailedException) { - accumulatedExceptions.add((BroadcastShardOperationFailedException) shardResultOrExceptions.get(i)); - } else { - results.add((ShardOperationResult) shardResultOrExceptions.get(i)); - } - } - - try { - channel.sendResponse( - new NodeResponse(request.getNodeId(), shardResultOrExceptions.length(), results, accumulatedExceptions) - ); - } catch (IOException e) { - logger.warn("failed to send response", e); - } - } - - private void onShardOperation( - final NodeRequest request, - final ShardRouting shardRouting, - final Task task, - final ActionListener listener - ) { - if (task instanceof CancellableTask && ((CancellableTask) task).notifyIfCancelled(listener)) { - return; - } - if (logger.isTraceEnabled()) { - logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary()); - } - final Consumer failureHandler = e -> { - BroadcastShardOperationFailedException failure = new BroadcastShardOperationFailedException( - shardRouting.shardId(), - "operation " + actionName + " failed", - e - ); - failure.setShard(shardRouting.shardId()); - if (TransportActions.isShardNotAvailableException(e)) { - if (logger.isTraceEnabled()) { - logger.trace( - () -> format("[%s] failed to execute operation for shard [%s]", actionName, shardRouting.shortSummary()), - e - ); - } - } else { - if (logger.isDebugEnabled()) { - logger.debug( + logger.log( + TransportActions.isShardNotAvailableException(e) ? Level.TRACE : Level.DEBUG, () -> format("[%s] failed to execute operation for shard [%s]", actionName, shardRouting.shortSummary()), e ); - } - } - listener.onFailure(failure); - }; - try { - shardOperation(request.indicesLevelRequest, shardRouting, task, new ActionListener<>() { - @Override - public void onResponse(ShardOperationResult shardOperationResult) { - if (logger.isTraceEnabled()) { - logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary()); + if (TransportActions.isShardNotAvailableException(e) == false) { + synchronized (exceptions) { + exceptions.add( + new BroadcastShardOperationFailedException( + shardRouting.shardId(), + "operation " + actionName + " failed", + e + ) + ); + } } - listener.onResponse(shardOperationResult); } @Override - public void onFailure(Exception e) { - failureHandler.accept(e); + public String toString() { + return "[" + actionName + "][" + shardRouting + "]"; } }); - } catch (Exception e) { - assert false : "shardOperation should not throw an exception, but delegate to listener instead"; - failureHandler.accept(e); + + if (task instanceof CancellableTask) { + shardFailureListeners.addListener(shardListener); + } + + logger.trace(() -> format("[%s] executing operation for shard [%s]", actionName, shardRouting.shortSummary())); + ActionRunnable.wrap( + ActionListener.releaseAfter(shardListener, refs.acquire()), + l -> shardOperation(request, shardRouting, task, l) + ).run(); } } } - public class NodeRequest extends TransportRequest implements IndicesRequest { - private String nodeId; - - private List shards; + class NodeRequest extends TransportRequest implements IndicesRequest { + private final Request indicesLevelRequest; + private final List shards; + private final String nodeId; - protected Request indicesLevelRequest; - - public NodeRequest(StreamInput in) throws IOException { + NodeRequest(StreamInput in) throws IOException { super(in); indicesLevelRequest = readRequestFrom(in); shards = in.readList(ShardRouting::new); nodeId = in.readString(); } - public NodeRequest(String nodeId, Request request, List shards) { - this.indicesLevelRequest = request; + NodeRequest(Request indicesLevelRequest, List shards, String nodeId) { + this.indicesLevelRequest = indicesLevelRequest; this.shards = shards; this.nodeId = nodeId; } - public List getShards() { + List getShards() { return shards; } - public String getNodeId() { + String getNodeId() { return nodeId; } + Request getIndicesLevelRequest() { + return indicesLevelRequest; + } + @Override public String[] indices() { return indicesLevelRequest.indices(); @@ -644,19 +582,23 @@ class NodeResponse extends TransportResponse { this.exceptions = exceptions; } - public String getNodeId() { + String getNodeId() { return nodeId; } - public int getTotalShards() { + int getTotalShards() { return totalShards; } - public int getSuccessfulShards() { + int getSuccessfulShards() { return results.size(); } - public List getExceptions() { + List getResults() { + return results; + } + + List getExceptions() { return exceptions; } @@ -681,13 +623,7 @@ public static final class EmptyResult implements Writeable { private EmptyResult() {} - private EmptyResult(StreamInput in) {} - @Override public void writeTo(StreamOutput out) {} - - public static EmptyResult readEmptyResultFrom(StreamInput in) { - return INSTANCE; - } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java index 013a922e2ef8..b26f6faa0881 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequestBuilder.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.action.support.master; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.TimeValue; @@ -16,7 +17,7 @@ */ public abstract class AcknowledgedRequestBuilder< Request extends AcknowledgedRequest, - Response extends AcknowledgedResponse, + Response extends ActionResponse & IsAcknowledgedSupplier, RequestBuilder extends AcknowledgedRequestBuilder> extends MasterNodeOperationRequestBuilder< Request, Response, diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java index b019782b27d7..e76d85f825ff 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java @@ -25,13 +25,14 @@ /** * A response that indicates that a request has been acknowledged */ -public class AcknowledgedResponse extends ActionResponse implements ToXContentObject { +public class AcknowledgedResponse extends ActionResponse implements IsAcknowledgedSupplier, ToXContentObject { public static final AcknowledgedResponse TRUE = new AcknowledgedResponse(true); public static final AcknowledgedResponse FALSE = new AcknowledgedResponse(false); - private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); + public static final String ACKNOWLEDGED_KEY = "acknowledged"; + private static final ParseField ACKNOWLEDGED = new ParseField(ACKNOWLEDGED_KEY); protected static void declareAcknowledgedField(ConstructingObjectParser objectParser) { objectParser.declareField( @@ -65,6 +66,7 @@ protected AcknowledgedResponse(boolean acknowledged) { * Returns whether the response is acknowledged or not * @return true if the response is acknowledged, false otherwise */ + @Override public final boolean isAcknowledged() { return acknowledged; } @@ -77,7 +79,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged()); + builder.field(ACKNOWLEDGED_KEY, isAcknowledged()); addCustomFields(builder, params); builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/IsAcknowledgedSupplier.java b/server/src/main/java/org/elasticsearch/action/support/master/IsAcknowledgedSupplier.java new file mode 100644 index 000000000000..ae5b1a93e235 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/master/IsAcknowledgedSupplier.java @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support.master; + +public interface IsAcknowledgedSupplier { + boolean isAcknowledged(); +} diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 67268712c49c..d404212fc834 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -245,7 +245,7 @@ protected void doStart(ClusterState clusterState) { masterNode, actionName, request, - new ActionListenerResponseHandler(listener, responseReader) { + new ActionListenerResponseHandler<>(listener, responseReader, executor) { @Override public void handleException(final TransportException exp) { Throwable cause = exp.unwrapCause(); diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index d07cee07a0d0..8e80444cc125 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.support.master.info; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; @@ -31,7 +31,7 @@ public ClusterInfoRequest() {} public ClusterInfoRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readStringArray(); } indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -41,7 +41,7 @@ public ClusterInfoRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } indicesOptions.writeIndicesOptions(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesXContentResponse.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesXContentResponse.java index 2922d33a85d4..4113788aafca 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesXContentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesXContentResponse.java @@ -33,7 +33,7 @@ protected BaseNodesXContentResponse(StreamInput in) throws IOException { } @Override - public final Iterator toXContentChunked() { + public final Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(Iterators.single((b, p) -> { b.startObject(); RestActions.buildNodesHeader(b, p, this); diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 02337b389430..5f805efe0c17 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -8,26 +8,29 @@ package org.elasticsearch.action.support.nodes; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.NodeResponseTracker; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.ListenableFuture; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -36,12 +39,16 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.core.Strings.format; + public abstract class TransportNodesAction< NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, NodeRequest extends TransportRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportNodesAction.class); + protected final ThreadPool threadPool; protected final ClusterService clusterService; protected final TransportService transportService; @@ -81,7 +88,7 @@ protected TransportNodesAction( this.nodeResponseClass = Objects.requireNonNull(nodeResponseClass); this.transportNodeAction = actionName + "[n]"; - this.finalExecutor = finalExecutor; + this.finalExecutor = finalExecutor.equals(ThreadPool.Names.SAME) ? ThreadPool.Names.GENERIC : finalExecutor; transportService.registerRequestHandler(transportNodeAction, nodeExecutor, nodeRequest, new NodeTransportHandler()); } @@ -119,40 +126,89 @@ protected TransportNodesAction( @Override protected void doExecute(Task task, NodesRequest request, ActionListener listener) { - new AsyncAction(task, request, listener).start(); - } + if (request.concreteNodes() == null) { + resolveRequest(request, clusterService.state()); + assert request.concreteNodes() != null; + } - /** - * Map the responses into {@code nodeResponseClass} responses and {@link FailedNodeException}s, convert to a {@link NodesResponse} and - * pass it to the listener. Fails the listener with a {@link NullPointerException} if {@code nodesResponses} is null. - * - * @param request The associated request. - * @param nodeResponseTracker All node-level responses collected so far - * @throws NodeResponseTracker.DiscardedResponsesException if {@code nodeResponseTracker} has already discarded the intermediate results - * @see #newResponseAsync(Task, BaseNodesRequest, List, List, ActionListener) - */ - // exposed for tests - void newResponse(Task task, NodesRequest request, NodeResponseTracker nodeResponseTracker, ActionListener listener) - throws NodeResponseTracker.DiscardedResponsesException { + final var responses = new ArrayList(request.concreteNodes().length); + final var exceptions = new ArrayList(0); - if (nodeResponseTracker == null) { - listener.onFailure(new NullPointerException("nodesResponses")); - return; + final var resultListener = new ListenableFuture(); + final var resultListenerCompleter = new RunOnce(() -> { + if (task instanceof CancellableTask cancellableTask) { + if (cancellableTask.notifyIfCancelled(resultListener)) { + return; + } + } + // ref releases all happen-before here so no need to be synchronized + threadPool.executor(finalExecutor) + .execute(ActionRunnable.wrap(resultListener, l -> newResponseAsync(task, request, responses, exceptions, l))); + }); + + final var nodeCancellationListener = new ListenableFuture(); // collects node listeners & completes them if cancelled + if (task instanceof CancellableTask cancellableTask) { + cancellableTask.addListener(() -> { + assert cancellableTask.isCancelled(); + resultListenerCompleter.run(); + cancellableTask.notifyIfCancelled(nodeCancellationListener); + }); } - final List responses = new ArrayList<>(); - final List failures = new ArrayList<>(); + final var transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); + + try (var refs = new RefCountingRunnable(() -> { + resultListener.addListener(listener); + resultListenerCompleter.run(); + })) { + for (final var node : request.concreteNodes()) { + final ActionListener nodeResponseListener = ActionListener.notifyOnce(new ActionListener<>() { + @Override + public void onResponse(NodeResponse nodeResponse) { + synchronized (responses) { + responses.add(nodeResponse); + } + } + + @Override + public void onFailure(Exception e) { + if (task instanceof CancellableTask cancellableTask && cancellableTask.isCancelled()) { + return; + } + + logger.debug(() -> format("failed to execute [%s] on node [%s]", actionName, node), e); + synchronized (exceptions) { + exceptions.add(new FailedNodeException(node.getId(), "Failed node [" + node.getId() + "]", e)); + } + } + + @Override + public String toString() { + return "[" + actionName + "][" + node.descriptionWithoutAttributes() + "]"; + } + }); + + if (task instanceof CancellableTask) { + nodeCancellationListener.addListener(nodeResponseListener); + } - for (int i = 0; i < nodeResponseTracker.getExpectedResponseCount(); ++i) { - Object response = nodeResponseTracker.getResponse(i); - if (nodeResponseTracker.getResponse(i)instanceof FailedNodeException failedNodeException) { - failures.add(failedNodeException); - } else { - responses.add(nodeResponseClass.cast(response)); + final var nodeRequest = newNodeRequest(request); + if (task != null) { + nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); + } + + transportService.sendRequest( + node, + transportNodeAction, + nodeRequest, + transportRequestOptions, + new ActionListenerResponseHandler<>( + ActionListener.releaseAfter(nodeResponseListener, refs.acquire()), + in -> newNodeResponse(in, node) + ) + ); } } - - newResponseAsync(task, request, responses, failures, listener); } /** @@ -195,141 +251,9 @@ protected void resolveRequest(NodesRequest request, ClusterState clusterState) { request.setConcreteNodes(Arrays.stream(nodesIds).map(clusterState.nodes()::get).toArray(DiscoveryNode[]::new)); } - /** - * Get a backwards compatible transport action name - */ - protected String getTransportNodeAction(DiscoveryNode node) { - return transportNodeAction; - } - - class AsyncAction implements CancellableTask.CancellationListener { - - private final NodesRequest request; - private final ActionListener listener; - private final NodeResponseTracker nodeResponseTracker; - private final Task task; - - AsyncAction(Task task, NodesRequest request, ActionListener listener) { - this.task = task; - this.request = request; - this.listener = listener; - if (request.concreteNodes() == null) { - resolveRequest(request, clusterService.state()); - assert request.concreteNodes() != null; - } - this.nodeResponseTracker = new NodeResponseTracker(request.concreteNodes().length); - } - - void start() { - if (task instanceof CancellableTask cancellableTask) { - cancellableTask.addListener(this); - } - final DiscoveryNode[] nodes = request.concreteNodes(); - if (nodes.length == 0) { - finishHim(); - return; - } - final TransportRequestOptions transportRequestOptions = TransportRequestOptions.timeout(request.timeout()); - for (int i = 0; i < nodes.length; i++) { - final int idx = i; - final DiscoveryNode node = nodes[i]; - final String nodeId = node.getId(); - try { - TransportRequest nodeRequest = newNodeRequest(request); - if (task != null) { - nodeRequest.setParentTask(clusterService.localNode().getId(), task.getId()); - } - - transportService.sendRequest( - node, - getTransportNodeAction(node), - nodeRequest, - transportRequestOptions, - new TransportResponseHandler() { - @Override - public NodeResponse read(StreamInput in) throws IOException { - return newNodeResponse(in, node); - } - - @Override - public void handleResponse(NodeResponse response) { - onOperation(idx, response); - } - - @Override - public void handleException(TransportException exp) { - onFailure(idx, node.getId(), exp); - } - - @Override - public String toString() { - return "AsyncActionNodeResponseHandler{node=" + node + ", action=" + AsyncAction.this + '}'; - } - } - ); - } catch (Exception e) { - onFailure(idx, nodeId, e); - } - } - } - - // For testing purposes - NodeResponseTracker getNodeResponseTracker() { - return nodeResponseTracker; - } - - private void onOperation(int idx, NodeResponse nodeResponse) { - if (nodeResponseTracker.trackResponseAndCheckIfLast(idx, nodeResponse)) { - finishHim(); - } - } - - private void onFailure(int idx, String nodeId, Throwable t) { - logger.debug(() -> "failed to execute on node [" + nodeId + "]", t); - if (nodeResponseTracker.trackResponseAndCheckIfLast(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t))) { - finishHim(); - } - } - - private void finishHim() { - if ((task instanceof CancellableTask t) && t.notifyIfCancelled(listener)) { - return; - } - - final String executor = finalExecutor.equals(ThreadPool.Names.SAME) ? ThreadPool.Names.GENERIC : finalExecutor; - threadPool.executor(executor).execute(() -> { - try { - newResponse(task, request, nodeResponseTracker, listener); - } catch (NodeResponseTracker.DiscardedResponsesException e) { - // We propagate the reason that the results, in this case the task cancellation, in case the listener needs to take - // follow-up actions - listener.onFailure((Exception) e.getCause()); - } - }); - } - - @Override - public void onCancelled() { - assert task instanceof CancellableTask : "task must be cancellable"; - try { - ((CancellableTask) task).ensureNotCancelled(); - } catch (TaskCancelledException e) { - nodeResponseTracker.discardIntermediateResponses(e); - } - } - - @Override - public String toString() { - return "AsyncAction{request=" + request + ", listener=" + listener + '}'; - } - } - class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeRequest request, TransportChannel channel, Task task) throws Exception { - if (task instanceof CancellableTask) { - ((CancellableTask) task).ensureNotCancelled(); - } channel.sendResponse(nodeOperation(request, task)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 8ec274bc410f..6b1916b4ec84 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -213,6 +213,7 @@ private void performOnReplica( final long maxSeqNoOfUpdatesOrDeletes, final PendingReplicationActions pendingReplicationActions ) { + assert shard.isPromotableToPrimary() : "only promotable shards should receive replication requests"; if (logger.isTraceEnabled()) { logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java index 62a2d3d38e06..94c90a0efcd8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -8,12 +8,13 @@ package org.elasticsearch.action.support.replication; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastRequest; @@ -25,15 +26,16 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.Transports; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; +import java.util.Map; /** * Base class for requests that should be executed on all shards of an index or several indices. @@ -49,6 +51,7 @@ public abstract class TransportBroadcastReplicationAction< private final ClusterService clusterService; private final IndexNameExpressionResolver indexNameExpressionResolver; private final NodeClient client; + private final String executor; public TransportBroadcastReplicationAction( String name, @@ -58,66 +61,112 @@ public TransportBroadcastReplicationAction( NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ActionType replicatedBroadcastShardAction + ActionType replicatedBroadcastShardAction, + String executor ) { super(name, transportService, actionFilters, requestReader); this.client = client; this.replicatedBroadcastShardAction = replicatedBroadcastShardAction; this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; + this.executor = executor; } @Override protected void doExecute(Task task, Request request, ActionListener listener) { - final ClusterState clusterState = clusterService.state(); - List shards = shards(request, clusterState); - final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList<>(); - if (shards.size() == 0) { - finishAndNotifyListener(listener, shardsResponses); - } - final CountDown responsesCountDown = new CountDown(shards.size()); - for (final ShardId shardId : shards) { - ActionListener shardActionListener = new ActionListener() { + clusterService.threadPool().executor(executor).execute(ActionRunnable.wrap(listener, createAsyncAction(task, request))); + } + + private CheckedConsumer, Exception> createAsyncAction(Task task, Request request) { + return new CheckedConsumer, Exception>() { + + private int totalShardCopyCount; + private int successShardCopyCount; + private final List allFailures = new ArrayList<>(); + + @Override + public void accept(ActionListener listener) { + assert totalShardCopyCount == 0 && successShardCopyCount == 0 && allFailures.isEmpty() : "shouldn't call this twice"; + + final ClusterState clusterState = clusterService.state(); + final List shards = shards(request, clusterState); + final Map indexMetadataByName = clusterState.getMetadata().indices(); + + try (var refs = new RefCountingRunnable(() -> finish(listener))) { + for (final ShardId shardId : shards) { + // NB This sends O(#shards) requests in a tight loop; TODO add some throttling here? + shardExecute( + task, + request, + shardId, + ActionListener.releaseAfter(new ReplicationResponseActionListener(shardId, indexMetadataByName), refs.acquire()) + ); + } + } + } + + private synchronized void addShardResponse(int numCopies, int successful, List failures) { + totalShardCopyCount += numCopies; + successShardCopyCount += successful; + allFailures.addAll(failures); + } + + void finish(ActionListener listener) { + // no need for synchronized here, the RefCountingRunnable guarantees that all the addShardResponse calls happen-before here + logger.trace("{}: got all shard responses", actionName); + listener.onResponse(newResponse(successShardCopyCount, allFailures.size(), totalShardCopyCount, allFailures)); + } + + class ReplicationResponseActionListener implements ActionListener { + private final ShardId shardId; + private final Map indexMetadataByName; + + ReplicationResponseActionListener(ShardId shardId, Map indexMetadataByName) { + this.shardId = shardId; + this.indexMetadataByName = indexMetadataByName; + } + @Override public void onResponse(ShardResponse shardResponse) { - shardsResponses.add(shardResponse); + assert shardResponse != null; logger.trace("{}: got response from {}", actionName, shardId); - if (responsesCountDown.countDown()) { - finishAndNotifyListener(listener, shardsResponses); - } + addShardResponse( + shardResponse.getShardInfo().getTotal(), + shardResponse.getShardInfo().getSuccessful(), + Arrays.stream(shardResponse.getShardInfo().getFailures()) + .map( + f -> new DefaultShardOperationFailedException( + new BroadcastShardOperationFailedException(shardId, f.getCause()) + ) + ) + .toList() + ); } @Override public void onFailure(Exception e) { logger.trace("{}: got failure from {}", actionName, shardId); - int totalNumCopies = clusterState.getMetadata().getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1; - ShardResponse shardResponse = newShardResponse(); - ReplicationResponse.ShardInfo.Failure[] failures; + final int numCopies = indexMetadataByName.get(shardId.getIndexName()).getNumberOfReplicas() + 1; + final List result; if (TransportActions.isShardNotAvailableException(e)) { - failures = new ReplicationResponse.ShardInfo.Failure[0]; + result = List.of(); } else { - ReplicationResponse.ShardInfo.Failure failure = new ReplicationResponse.ShardInfo.Failure( - shardId, - null, - e, - ExceptionsHelper.status(e), - true + final var failures = new DefaultShardOperationFailedException[numCopies]; + Arrays.fill( + failures, + new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(shardId, e)) ); - failures = new ReplicationResponse.ShardInfo.Failure[totalNumCopies]; - Arrays.fill(failures, failure); - } - shardResponse.setShardInfo(new ReplicationResponse.ShardInfo(totalNumCopies, 0, failures)); - shardsResponses.add(shardResponse); - if (responsesCountDown.countDown()) { - finishAndNotifyListener(listener, shardsResponses); + result = Arrays.asList(failures); } + addShardResponse(numCopies, 0, result); } - }; - shardExecute(task, request, shardId, shardActionListener); - } + } + + }; } protected void shardExecute(Task task, Request request, ShardId shardId, ActionListener shardActionListener) { + assert Transports.assertNotTransportThread("may hit all the shards"); ShardRequest shardRequest = newShardRequest(request, shardId); shardRequest.setParentTask(clusterService.localNode().getId(), task.getId()); client.executeLocally(replicatedBroadcastShardAction, shardRequest, shardActionListener); @@ -127,6 +176,7 @@ protected void shardExecute(Task task, Request request, ShardId shardId, ActionL * @return all shard ids the request should run on */ protected List shards(Request request, ClusterState clusterState) { + assert Transports.assertNotTransportThread("may hit all the shards"); List shardIds = new ArrayList<>(); String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); for (String index : concreteIndices) { @@ -141,43 +191,13 @@ protected List shards(Request request, ClusterState clusterState) { return shardIds; } - protected abstract ShardResponse newShardResponse(); - protected abstract ShardRequest newShardRequest(Request request, ShardId shardId); - private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayList shardsResponses) { - logger.trace("{}: got all shard responses", actionName); - int successfulShards = 0; - int failedShards = 0; - int totalNumCopies = 0; - List shardFailures = null; - for (int i = 0; i < shardsResponses.size(); i++) { - ReplicationResponse shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // non active shard, ignore - } else { - failedShards += shardResponse.getShardInfo().getFailed(); - successfulShards += shardResponse.getShardInfo().getSuccessful(); - totalNumCopies += shardResponse.getShardInfo().getTotal(); - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - for (ReplicationResponse.ShardInfo.Failure failure : shardResponse.getShardInfo().getFailures()) { - shardFailures.add( - new DefaultShardOperationFailedException( - new BroadcastShardOperationFailedException(failure.fullShardId(), failure.getCause()) - ) - ); - } - } - } - listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); - } - protected abstract Response newResponse( int successfulShards, int failedShards, int totalNumCopies, List shardFailures ); + } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 7e299f5baa25..1414aa5bbdb7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -497,7 +497,7 @@ public void handleException(TransportException exp) { new ReplicationOperation<>( primaryRequest.getRequest(), primaryShardReference, - responseListener.map(result -> result.finalResponseIfSuccessful), + responseListener.map(result -> result.replicationResponse), newReplicasProxy(), logger, threadPool, @@ -533,29 +533,18 @@ protected void adaptResponse(Response response, IndexShard indexShard) { public static class PrimaryResult, Response extends ReplicationResponse> implements ReplicationOperation.PrimaryResult { - protected final ReplicaRequest replicaRequest; - public final Response finalResponseIfSuccessful; - public final Exception finalFailure; + private final ReplicaRequest replicaRequest; + public final Response replicationResponse; /** * Result of executing a primary operation - * expects finalResponseIfSuccessful or finalFailure to be not-null + * expects replicationResponse to be not-null */ - public PrimaryResult(ReplicaRequest replicaRequest, Response finalResponseIfSuccessful, Exception finalFailure) { - assert finalFailure != null ^ finalResponseIfSuccessful != null - : "either a response or a failure has to be not null, " - + "found [" - + finalFailure - + "] failure and [" - + finalResponseIfSuccessful - + "] response"; - this.replicaRequest = replicaRequest; - this.finalResponseIfSuccessful = finalResponseIfSuccessful; - this.finalFailure = finalFailure; - } - public PrimaryResult(ReplicaRequest replicaRequest, Response replicationResponse) { - this(replicaRequest, replicationResponse, null); + assert replicaRequest != null : "request is required"; + assert replicationResponse != null : "response is required"; + this.replicaRequest = replicaRequest; + this.replicationResponse = replicationResponse; } @Override @@ -565,18 +554,12 @@ public ReplicaRequest replicaRequest() { @Override public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { - if (finalResponseIfSuccessful != null) { - finalResponseIfSuccessful.setShardInfo(shardInfo); - } + replicationResponse.setShardInfo(shardInfo); } @Override public void runPostReplicationActions(ActionListener listener) { - if (finalFailure != null) { - listener.onFailure(finalFailure); - } else { - listener.onResponse(null); - } + listener.onResponse(null); } } @@ -1140,8 +1123,8 @@ public void failShard(String reason, Exception e) { public void perform(Request request, ActionListener> listener) { if (Assertions.ENABLED) { listener = listener.map(result -> { - assert result.replicaRequest() == null || result.finalFailure == null - : "a replica request [" + result.replicaRequest() + "] with a primary failure [" + result.finalFailure + "]"; + assert result.replicaRequest().getParentTask().equals(request.getParentTask()) + : "a replica request [" + result.replicaRequest() + "] with a different parent task from the primary request"; return result; }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index ffcd530ae418..fb81739d14f9 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -270,58 +270,45 @@ public WritePrimaryResult( ReplicaRequest request, @Nullable Response finalResponse, @Nullable Location location, - @Nullable Exception operationFailure, IndexShard primary, Logger logger ) { - this(request, finalResponse, location, operationFailure, primary, logger, null); + this(request, finalResponse, location, primary, logger, null); } public WritePrimaryResult( ReplicaRequest request, @Nullable Response finalResponse, @Nullable Location location, - @Nullable Exception operationFailure, IndexShard primary, Logger logger, @Nullable Consumer postWriteAction ) { - super(request, finalResponse, operationFailure); + super(request, finalResponse); this.location = location; this.primary = primary; this.logger = logger; this.postWriteAction = postWriteAction; - assert location == null || operationFailure == null - : "expected either failure to be null or translog location to be null, " - + "but found: [" - + location - + "] translog location and [" - + operationFailure - + "] failure"; } @Override public void runPostReplicationActions(ActionListener listener) { - if (finalFailure != null) { - listener.onFailure(finalFailure); - } else { - /* - * We call this after replication because this might wait for a refresh and that can take a while. - * This way we wait for the refresh in parallel on the primary and on the replica. - */ - new AsyncAfterWriteAction(primary, replicaRequest, location, new RespondingWriteResult() { - @Override - public void onSuccess(boolean forcedRefresh) { - finalResponseIfSuccessful.setForcedRefresh(forcedRefresh); - listener.onResponse(null); - } + /* + * We call this after replication because this might wait for a refresh and that can take a while. + * This way we wait for the refresh in parallel on the primary and on the replica. + */ + new AsyncAfterWriteAction(primary, replicaRequest(), location, new RespondingWriteResult() { + @Override + public void onSuccess(boolean forcedRefresh) { + replicationResponse.setForcedRefresh(forcedRefresh); + listener.onResponse(null); + } - @Override - public void onFailure(Exception ex) { - listener.onFailure(ex); - } - }, logger, postWriteAction).run(); - } + @Override + public void onFailure(Exception ex) { + listener.onFailure(ex); + } + }, logger, postWriteAction).run(); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index e56d1568d20e..30a9c92d7c04 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -92,7 +92,10 @@ protected void doExecute(Task task, TasksRequest request, ActionListener listener) { TasksRequest request = nodeTaskRequest.tasksRequest; List tasks = new ArrayList<>(); - processTasks(request, tasks::add); + processTasks(request, tasks::add, ActionListener.wrap(noop -> nodeOperation(task, listener, request, tasks), listener::onFailure)); + } + + private void nodeOperation(Task task, ActionListener listener, TasksRequest request, List tasks) { if (tasks.isEmpty()) { listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), emptyList(), emptyList())); return; @@ -154,6 +157,11 @@ protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) } } + protected void processTasks(TasksRequest request, Consumer operation, ActionListener nodeOperation) { + processTasks(request, operation); + nodeOperation.onResponse(null); + } + @SuppressWarnings("unchecked") protected void processTasks(TasksRequest request, Consumer operation) { if (request.getTargetTaskId().isSet()) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index bfa07fa55d3f..150545f51b14 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -41,7 +41,7 @@ public Failure(String index, String id, Exception cause) { public Failure(StreamInput in) throws IOException { index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types no longer relevant so ignore String type = in.readOptionalString(); if (type != null) { @@ -76,7 +76,7 @@ public Exception getCause() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeOptionalString(null); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index 6a77388bcd5f..fb5f1f6ee134 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; @@ -128,7 +128,7 @@ public TermVectorsRequest() {} TermVectorsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types no longer relevant so ignore in.readString(); } @@ -477,7 +477,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeString("_doc"); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index b30cb8a7a7f8..82174485d829 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -16,7 +16,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag; import org.elasticsearch.common.bytes.BytesArray; @@ -91,7 +91,7 @@ public TermVectorsResponse(String index, String id) { TermVectorsResponse(StreamInput in) throws IOException { index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types no longer relevant so ignore in.readString(); } @@ -109,7 +109,7 @@ public TermVectorsResponse(String index, String id) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeString(MapperService.SINGLE_MAPPING_NAME); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 1c222590597b..6801bf2c0fd1 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.update; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -136,7 +136,7 @@ public UpdateRequest(StreamInput in) throws IOException { public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); waitForActiveShards = ActiveShardCount.readFrom(in); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -159,7 +159,7 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); scriptedUpsert = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { requireAlias = in.readBoolean(); } else { requireAlias = false; @@ -860,7 +860,7 @@ public void writeThin(StreamOutput out) throws IOException { private void doWrite(StreamOutput out, boolean thin) throws IOException { waitForActiveShards.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); @@ -905,7 +905,7 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { out.writeVLong(ifPrimaryTerm); out.writeBoolean(detectNoop); out.writeBoolean(scriptedUpsert); - if (out.getVersion().onOrAfter(Version.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { out.writeBoolean(requireAlias); } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index e56c53e16d28..a9ef2e68a676 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -11,14 +11,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; +import org.elasticsearch.node.NodeValidationException; import java.io.PrintStream; -import static org.elasticsearch.bootstrap.BootstrapInfo.USER_EXCEPTION_MARKER; - /** * A container for transient state during bootstrap of the Elasticsearch process. */ @@ -71,10 +71,10 @@ Environment environment() { return nodeEnv.get(); } - void exitWithUserException(int exitCode, Exception e) { - err.print(USER_EXCEPTION_MARKER); - err.println(e.getMessage()); - gracefullyExit(exitCode); + void exitWithNodeValidationException(NodeValidationException e) { + Logger logger = LogManager.getLogger(Elasticsearch.class); + logger.error("node validation exception\n{}", e.getMessage()); + gracefullyExit(ExitCodes.CONFIG); } void exitWithUnknownException(Throwable e) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 0399624609c4..b9610c689f92 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -25,6 +25,7 @@ import java.io.BufferedReader; import java.io.IOException; +import java.nio.ByteOrder; import java.nio.file.Files; import java.nio.file.Path; import java.security.AllPermission; @@ -205,6 +206,7 @@ static List checks() { checks.add(new EarlyAccessCheck()); checks.add(new AllPermissionCheck()); checks.add(new DiscoveryConfiguredCheck()); + checks.add(new ByteOrderCheck()); return Collections.unmodifiableList(checks); } @@ -702,4 +704,19 @@ public BootstrapCheckResult check(BootstrapContext context) { ); } } + + static class ByteOrderCheck implements BootstrapCheck { + + @Override + public BootstrapCheckResult check(BootstrapContext context) { + if (nativeByteOrder() != ByteOrder.LITTLE_ENDIAN) { + return BootstrapCheckResult.failure("Little-endian native byte order is required to run Elasticsearch"); + } + return BootstrapCheckResult.success(); + } + + ByteOrder nativeByteOrder() { + return ByteOrder.nativeOrder(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java index 0e565f22ba76..740833dffc7c 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapInfo.java @@ -66,13 +66,6 @@ public static ConsoleLoader.Console getConsole() { */ public static final String UNTRUSTED_CODEBASE = "/untrusted"; - /** - * A non-printable character denoting a UserException has occurred. - * - * This is sent over stderr to the controlling CLI process. - */ - public static final char USER_EXCEPTION_MARKER = '\u0015'; - /** * A non-printable character denoting the server is ready to process requests. * diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java b/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java index 8b0d914e2da3..955e4439d1f7 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ConsoleLoader.java @@ -53,9 +53,8 @@ static Supplier buildConsoleLoader(ClassLoader classLoader) { private static ClassLoader buildClassLoader(Environment env) { final Path libDir = env.libFile().resolve("tools").resolve("ansi-console"); - try { - final URL[] urls = Files.list(libDir) - .filter(each -> each.getFileName().toString().endsWith(".jar")) + try (var libDirFilesStream = Files.list(libDir)) { + final URL[] urls = libDirFilesStream.filter(each -> each.getFileName().toString().endsWith(".jar")) .map(ConsoleLoader::pathToURL) .toArray(URL[]::new); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 8d30feb41452..bfaf712d4872 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -16,12 +16,11 @@ import org.apache.lucene.util.StringHelper; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.filesystem.FileSystemNatives; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.network.IfConfig; -import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; @@ -39,6 +38,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; +import java.lang.invoke.MethodHandles; import java.nio.file.Files; import java.nio.file.Path; import java.security.Permission; @@ -66,7 +66,7 @@ public static void main(final String[] args) { initPhase2(bootstrap); initPhase3(bootstrap); } catch (NodeValidationException e) { - bootstrap.exitWithUserException(ExitCodes.CONFIG, e); + bootstrap.exitWithNodeValidationException(e); } catch (Throwable t) { bootstrap.exitWithUnknownException(t); } @@ -143,14 +143,9 @@ public void checkPermission(Permission perm) { */ private static void initPhase2(Bootstrap bootstrap) throws IOException { final ServerArgs args = bootstrap.args(); - final SecureSettings keystore; - try { - keystore = KeyStoreWrapper.bootstrap(args.configDir(), args::keystorePassword); - } catch (Exception e) { - throw new RuntimeException(e); - } - bootstrap.setSecureSettings(keystore); - Environment nodeEnv = createEnvironment(args.configDir(), args.nodeSettings(), keystore); + final SecureSettings secrets = args.secrets(); + bootstrap.setSecureSettings(secrets); + Environment nodeEnv = createEnvironment(args.configDir(), args.nodeSettings(), secrets); bootstrap.setEnvironment(nodeEnv); initPidFile(args.pidFile()); @@ -182,6 +177,13 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { // Log ifconfig output before SecurityManager is installed IfConfig.logIfNecessary(); + try { + // ReferenceDocs class does nontrivial static initialization which should always succeed but load it now (before SM) to be sure + MethodHandles.publicLookup().ensureInitialized(ReferenceDocs.class); + } catch (IllegalAccessException unexpected) { + throw new AssertionError(unexpected); + } + // install SM after natives, shutdown hooks, etc. org.elasticsearch.bootstrap.Security.configure( nodeEnv, diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index a8c732b6fa02..cefcd4bfb511 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -52,12 +52,19 @@ static boolean isFatalUncaught(Throwable e) { void onFatalUncaught(final String threadName, final Throwable t) { final String message = "fatal error in thread [" + threadName + "], exiting"; - logger.error(message, t); + logErrorMessage(t, message); } void onNonFatalUncaught(final String threadName, final Throwable t) { final String message = "uncaught exception in thread [" + threadName + "]"; - logger.error(message, t); + logErrorMessage(t, message); + } + + private void logErrorMessage(Throwable t, String message) { + AccessController.doPrivileged((PrivilegedAction) () -> { + logger.error(message, t); + return null; + }); } void halt(int status) { diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java b/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java index c324370573fc..bc7f1928d170 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ServerArgs.java @@ -11,12 +11,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import java.io.IOException; +import java.lang.reflect.InvocationTargetException; import java.nio.file.Path; /** @@ -25,18 +26,13 @@ * @param daemonize {@code true} if Elasticsearch should run as a daemon process, or {@code false} otherwise * @param quiet {@code false} if Elasticsearch should print log output to the console, {@code true} otherwise * @param pidFile absolute path to a file Elasticsearch should write its process id to, or {@code null} if no pid file should be written - * @param keystorePassword the password for the Elasticsearch keystore + * @param secrets the provided secure settings implementation * @param nodeSettings the node settings read from {@code elasticsearch.yml}, the cli and the process environment * @param configDir the directory where {@code elasticsearch.yml} and other config exists */ -public record ServerArgs( - boolean daemonize, - boolean quiet, - Path pidFile, - SecureString keystorePassword, - Settings nodeSettings, - Path configDir -) implements Writeable { +public record ServerArgs(boolean daemonize, boolean quiet, Path pidFile, SecureSettings secrets, Settings nodeSettings, Path configDir) + implements + Writeable { /** * Arguments for running Elasticsearch. @@ -44,12 +40,13 @@ public record ServerArgs( * @param daemonize {@code true} if Elasticsearch should run as a daemon process, or {@code false} otherwise * @param quiet {@code false} if Elasticsearch should print log output to the console, {@code true} otherwise * @param pidFile absolute path to a file Elasticsearch should write its process id to, or {@code null} if no pid file should be written - * @param keystorePassword the password for the Elasticsearch keystore + * @param secrets the provided secure settings implementation * @param nodeSettings the node settings read from {@code elasticsearch.yml}, the cli and the process environment * @param configDir the directory where {@code elasticsearch.yml} and other config exists */ public ServerArgs { assert pidFile == null || pidFile.isAbsolute(); + assert secrets != null; } /** @@ -60,7 +57,7 @@ public ServerArgs(StreamInput in) throws IOException { in.readBoolean(), in.readBoolean(), readPidFile(in), - in.readSecureString(), + readSecureSettingsFromStream(in), Settings.readSettingsFromStream(in), resolvePath(in.readString()) ); @@ -81,8 +78,19 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(daemonize); out.writeBoolean(quiet); out.writeOptionalString(pidFile == null ? null : pidFile.toString()); - out.writeSecureString(keystorePassword); + out.writeString(secrets.getClass().getName()); + secrets.writeTo(out); nodeSettings.writeTo(out); out.writeString(configDir.toString()); } + + private static SecureSettings readSecureSettingsFromStream(StreamInput in) throws IOException { + String className = in.readString(); + try { + return (SecureSettings) Class.forName(className).getConstructor(StreamInput.class).newInstance(in); + } catch (NoSuchMethodException | ClassNotFoundException | InstantiationException | IllegalAccessException + | InvocationTargetException cfe) { + throw new IllegalArgumentException("Invalid secrets implementation [" + className + "]", cfe); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java b/server/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java index f330bb037c12..d2ccf3d6a2e0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java +++ b/server/src/main/java/org/elasticsearch/cluster/AbstractNamedDiffable.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -48,7 +48,7 @@ private static class CompleteNamedDiff> implements Na * is unnecessary. */ @Nullable - private final Version minimalSupportedVersion; + private final TransportVersion minimalSupportedVersion; /** * Creates simple diff with changes @@ -62,7 +62,7 @@ private static class CompleteNamedDiff> implements Na /** * Creates simple diff without changes */ - CompleteNamedDiff(String name, Version minimalSupportedVersion) { + CompleteNamedDiff(String name, TransportVersion minimalSupportedVersion) { this.part = null; this.name = name; this.minimalSupportedVersion = minimalSupportedVersion; @@ -108,7 +108,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { + public TransportVersion getMinimalSupportedVersion() { assert minimalSupportedVersion != null : "shouldn't be called on the diff that was de-serialized from the stream"; return minimalSupportedVersion; } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index bd26743690bb..c576389d72e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -42,8 +42,8 @@ public class ClusterInfo implements ToXContentFragment, Writeable { public static final ClusterInfo EMPTY = new ClusterInfo(); - public static final Version DATA_SET_SIZE_SIZE_VERSION = Version.V_7_13_0; - public static final Version DATA_PATH_NEW_KEY_VERSION = Version.V_8_6_0; + public static final TransportVersion DATA_SET_SIZE_SIZE_VERSION = TransportVersion.V_7_13_0; + public static final TransportVersion DATA_PATH_NEW_KEY_VERSION = TransportVersion.V_8_6_0; private final Map leastAvailableSpaceUsage; private final Map mostAvailableSpaceUsage; @@ -76,9 +76,9 @@ public ClusterInfo( Map reservedSpace ) { this.leastAvailableSpaceUsage = Map.copyOf(leastAvailableSpaceUsage); + this.mostAvailableSpaceUsage = Map.copyOf(mostAvailableSpaceUsage); this.shardSizes = Map.copyOf(shardSizes); this.shardDataSetSizes = Map.copyOf(shardDataSetSizes); - this.mostAvailableSpaceUsage = Map.copyOf(mostAvailableSpaceUsage); this.dataPath = Map.copyOf(dataPath); this.reservedSpace = Map.copyOf(reservedSpace); } @@ -87,17 +87,17 @@ public ClusterInfo(StreamInput in) throws IOException { this.leastAvailableSpaceUsage = in.readImmutableMap(StreamInput::readString, DiskUsage::new); this.mostAvailableSpaceUsage = in.readImmutableMap(StreamInput::readString, DiskUsage::new); this.shardSizes = in.readImmutableMap(StreamInput::readString, StreamInput::readLong); - if (in.getVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) { + if (in.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) { this.shardDataSetSizes = in.readImmutableMap(ShardId::new, StreamInput::readLong); } else { this.shardDataSetSizes = Map.of(); } - if (in.getVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) { + if (in.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) { this.dataPath = in.readImmutableMap(NodeAndShard::new, StreamInput::readString); } else { this.dataPath = in.readImmutableMap(nested -> NodeAndShard.from(new ShardRouting(nested)), StreamInput::readString); } - if (in.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { + if (in.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { this.reservedSpace = in.readImmutableMap(NodeAndPath::new, ReservedSpace::new); } else { this.reservedSpace = Map.of(); @@ -109,15 +109,15 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> o.writeLong(v == null ? -1 : v)); - if (out.getVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) { + if (out.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) { out.writeMap(this.shardDataSetSizes, (o, s) -> s.writeTo(o), StreamOutput::writeLong); } - if (out.getVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) { + if (out.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) { out.writeMap(this.dataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString); } else { out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString); } - if (out.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { + if (out.getTransportVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) { out.writeMap(this.reservedSpace); } } @@ -133,7 +133,8 @@ private static ShardRouting createFakeShardRoutingFromNodeAndShard(NodeAndShard nodeAndShard.shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE, - new UnassignedInfo(REINITIALIZED, "fake") + new UnassignedInfo(REINITIALIZED, "fake"), + ShardRouting.Role.DEFAULT // ok, this is only used prior to DATA_PATH_NEW_KEY_VERSION which has no other roles ).initialize(nodeAndShard.nodeId, null, 0L).moveToStarted(0L); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java index 0485e06f6663..0420611b8edf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfoSimulator.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.util.CopyOnFirstWriteMap; import org.elasticsearch.index.shard.ShardId; import java.util.HashMap; @@ -19,14 +20,14 @@ public class ClusterInfoSimulator { private final Map leastAvailableSpaceUsage; private final Map mostAvailableSpaceUsage; - private final Map shardSizes; + private final CopyOnFirstWriteMap shardSizes; private final Map shardDataSetSizes; private final Map dataPath; public ClusterInfoSimulator(ClusterInfo clusterInfo) { this.leastAvailableSpaceUsage = new HashMap<>(clusterInfo.getNodeLeastAvailableDiskUsages()); this.mostAvailableSpaceUsage = new HashMap<>(clusterInfo.getNodeMostAvailableDiskUsages()); - this.shardSizes = new HashMap<>(clusterInfo.shardSizes); + this.shardSizes = new CopyOnFirstWriteMap<>(clusterInfo.shardSizes); this.shardDataSetSizes = Map.copyOf(clusterInfo.shardDataSetSizes); this.dataPath = Map.copyOf(clusterInfo.dataPath); } @@ -101,6 +102,13 @@ private static long withinRange(long min, long max, long value) { } public ClusterInfo getClusterInfo() { - return new ClusterInfo(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, shardDataSetSizes, dataPath, Map.of()); + return new ClusterInfo( + leastAvailableSpaceUsage, + mostAvailableSpaceUsage, + shardSizes.toImmutableMap(), + shardDataSetSizes, + dataPath, + Map.of() + ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index ffe8000480a2..d6e8c2ed8b9c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -25,7 +25,8 @@ import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.routing.DelayedAllocationService; -import org.elasticsearch.cluster.routing.RerouteService; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -114,6 +115,7 @@ public class ClusterModule extends AbstractModule { // pkg private for tests final Collection deciderList; final ShardsAllocator shardsAllocator; + private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; public ClusterModule( Settings settings, @@ -123,7 +125,6 @@ public ClusterModule( SnapshotsInfoService snapshotsInfoService, ThreadPool threadPool, SystemIndices systemIndices, - Supplier rerouteServiceSupplier, WriteLoadForecaster writeLoadForecaster ) { this.clusterPlugins = clusterPlugins; @@ -136,15 +137,44 @@ public ClusterModule( clusterPlugins, clusterService, this::reconcile, - writeLoadForecaster, - clusterInfoService + writeLoadForecaster ); this.clusterService = clusterService; this.indexNameExpressionResolver = new IndexNameExpressionResolver(threadPool.getThreadContext(), systemIndices); - this.allocationService = new AllocationService(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService); + this.shardRoutingRoleStrategy = getShardRoutingRoleStrategy(clusterPlugins); + this.allocationService = new AllocationService( + allocationDeciders, + shardsAllocator, + clusterInfoService, + snapshotsInfoService, + shardRoutingRoleStrategy + ); this.metadataDeleteIndexService = new MetadataDeleteIndexService(settings, clusterService, allocationService); } + static ShardRoutingRoleStrategy getShardRoutingRoleStrategy(List clusterPlugins) { + final var strategies = clusterPlugins.stream().map(ClusterPlugin::getShardRoutingRoleStrategy).filter(Objects::nonNull).toList(); + return switch (strategies.size()) { + case 0 -> new ShardRoutingRoleStrategy() { + + // NOTE: this is deliberately an anonymous class to avoid any possibility of using this DEFAULT-only strategy when a plugin + // has injected a different strategy. + + @Override + public ShardRouting.Role newReplicaRole() { + return ShardRouting.Role.DEFAULT; + } + + @Override + public ShardRouting.Role newEmptyRole(int copyIndex) { + return ShardRouting.Role.DEFAULT; + } + }; + case 1 -> strategies.get(0); + default -> throw new IllegalArgumentException("multiple plugins define shard role strategies, which is not permitted"); + }; + } + private ClusterState reconcile(ClusterState clusterState, Consumer routingAllocationConsumer) { return allocationService.executeWithRoutingAllocation(clusterState, "reconcile-desired-balance", routingAllocationConsumer); } @@ -313,19 +343,19 @@ public static Collection createAllocationDeciders( addAllocationDecider(deciders, new ResizeAllocationDecider()); addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider()); addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider()); - addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new EnableAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(clusterSettings)); + addAllocationDecider(deciders, new ConcurrentRebalanceAllocationDecider(clusterSettings)); + addAllocationDecider(deciders, new EnableAllocationDecider(clusterSettings)); addAllocationDecider(deciders, new NodeVersionAllocationDecider()); addAllocationDecider(deciders, new SnapshotInProgressAllocationDecider()); addAllocationDecider(deciders, new RestoreInProgressAllocationDecider()); addAllocationDecider(deciders, new NodeShutdownAllocationDecider()); addAllocationDecider(deciders, new NodeReplacementAllocationDecider()); addAllocationDecider(deciders, new FilterAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new SameShardAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new SameShardAllocationDecider(clusterSettings)); addAllocationDecider(deciders, new DiskThresholdDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new ThrottlingAllocationDecider(settings, clusterSettings)); - addAllocationDecider(deciders, new ShardsLimitAllocationDecider(settings, clusterSettings)); + addAllocationDecider(deciders, new ThrottlingAllocationDecider(clusterSettings)); + addAllocationDecider(deciders, new ShardsLimitAllocationDecider(clusterSettings)); addAllocationDecider(deciders, new AwarenessAllocationDecider(settings, clusterSettings)); clusterPlugins.stream() @@ -349,15 +379,15 @@ private static ShardsAllocator createShardsAllocator( List clusterPlugins, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler, - WriteLoadForecaster writeLoadForecaster, - ClusterInfoService clusterInfoService + WriteLoadForecaster writeLoadForecaster ) { Map> allocators = new HashMap<>(); - allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(settings, clusterSettings, writeLoadForecaster)); + allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(clusterSettings, writeLoadForecaster)); allocators.put( DESIRED_BALANCE_ALLOCATOR, () -> new DesiredBalanceShardsAllocator( - new BalancedShardsAllocator(settings, clusterSettings, writeLoadForecaster), + clusterSettings, + new BalancedShardsAllocator(clusterSettings, writeLoadForecaster), threadPool, clusterService, reconciler @@ -405,6 +435,7 @@ protected void configure() { bind(TaskResultsService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); + bind(ShardRoutingRoleStrategy.class).toInstance(shardRoutingRoleStrategy); } public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator) { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 3ece7e345ffd..165f2413f5f9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -21,12 +21,9 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; @@ -35,6 +32,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -43,19 +41,23 @@ import org.elasticsearch.common.io.stream.VersionedNamedWriteable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.Executor; import java.util.function.Consumer; +import java.util.function.Function; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -92,7 +94,7 @@ *

* Cluster state updates can be used to trigger various actions via a {@link ClusterStateListener} rather than using a timer. *

- * Implements {@link ToXContentFragment} to be exposed in REST APIs (e.g. {@code GET _cluster/state} and {@code POST _cluster/reroute}) and + * Implements {@link ChunkedToXContent} to be exposed in REST APIs (e.g. {@code GET _cluster/state} and {@code POST _cluster/reroute}) and * to be indexed by monitoring, mostly just for diagnostics purposes. The {@link XContent} representation does not need to be 100% faithful * since we never reconstruct a cluster state from its XContent representation, but the more faithful it is the more useful it is for * diagnostics. Note that the {@link XContent} representation of the {@link Metadata} portion does have to be faithful (in {@link @@ -101,11 +103,11 @@ * Security-sensitive data such as passwords or private keys should not be stored in the cluster state, since the contents of the cluster * state are exposed in various APIs. */ -public class ClusterState implements ToXContentFragment, Diffable { +public class ClusterState implements ChunkedToXContent, Diffable { public static final ClusterState EMPTY_STATE = builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); - public interface Custom extends NamedDiffable, ToXContentFragment { + public interface Custom extends NamedDiffable, ChunkedToXContent { /** * Returns true iff this {@link Custom} is private to the cluster and should never be send to a client. @@ -121,7 +123,7 @@ default boolean isPrivate() { * the more faithful it is the more useful it is for diagnostics. */ @Override - XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; + Iterator toXContentChunked(ToXContent.Params params); } private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); @@ -517,114 +519,131 @@ public String toString() { } } + private static Iterator chunkedSection( + boolean condition, + ToXContent before, + Iterator items, + Function> fn, + ToXContent after + ) { + return condition + ? Iterators.concat(Iterators.single(before), Iterators.flatMap(items, fn::apply), Iterators.single(after)) + : Collections.emptyIterator(); + } + @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); + public Iterator toXContentChunked(ToXContent.Params outerParams) { + final var metrics = Metric.parseString(outerParams.param("metric", "_all"), true); - // always provide the cluster_uuid as part of the top-level response (also part of the metadata response) - builder.field("cluster_uuid", metadata().clusterUUID()); + return Iterators.concat( - if (metrics.contains(Metric.VERSION)) { - builder.field("version", version); - builder.field("state_uuid", stateUUID); - } + // header chunk + Iterators.single(((builder, params) -> { + // always provide the cluster_uuid as part of the top-level response (also part of the metadata response) + builder.field("cluster_uuid", metadata().clusterUUID()); - if (metrics.contains(Metric.MASTER_NODE)) { - builder.field("master_node", nodes().getMasterNodeId()); - } - - if (metrics.contains(Metric.BLOCKS)) { - builder.startObject("blocks"); + // state version info + if (metrics.contains(Metric.VERSION)) { + builder.field("version", version); + builder.field("state_uuid", stateUUID); + } - if (blocks().global().isEmpty() == false) { - builder.startObject("global"); - for (ClusterBlock block : blocks().global()) { - block.toXContent(builder, params); + // master node + if (metrics.contains(Metric.MASTER_NODE)) { + builder.field("master_node", nodes().getMasterNodeId()); } - builder.endObject(); - } - if (blocks().indices().isEmpty() == false) { - builder.startObject("indices"); - for (Map.Entry> entry : blocks().indices().entrySet()) { - builder.startObject(entry.getKey()); - for (ClusterBlock block : entry.getValue()) { + return builder; + })), + + // blocks + chunkedSection(metrics.contains(Metric.BLOCKS), (builder, params) -> { + builder.startObject("blocks"); + if (blocks().global().isEmpty() == false) { + builder.startObject("global"); + for (ClusterBlock block : blocks().global()) { block.toXContent(builder, params); } builder.endObject(); } - builder.endObject(); - } - - builder.endObject(); - } - - // nodes - if (metrics.contains(Metric.NODES)) { - builder.startObject("nodes"); - for (DiscoveryNode node : nodes) { - node.toXContent(builder, params); - } - builder.endObject(); - } - - // meta data - if (metrics.contains(Metric.METADATA)) { - metadata.toXContent(builder, params); - } - - // routing table - if (metrics.contains(Metric.ROUTING_TABLE)) { - builder.startObject("routing_table"); - builder.startObject("indices"); - for (IndexRoutingTable indexRoutingTable : routingTable()) { - builder.startObject(indexRoutingTable.getIndex().getName()); - builder.startObject("shards"); - for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { - IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); - builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id())); - for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) { - indexShardRoutingTable.shard(copy).toXContent(builder, params); - } - builder.endArray(); + if (blocks().indices().isEmpty() == false) { + builder.startObject("indices"); } - builder.endObject(); - builder.endObject(); - } - builder.endObject(); - builder.endObject(); - } - - // routing nodes - if (metrics.contains(Metric.ROUTING_NODES)) { - builder.startObject("routing_nodes"); - builder.startArray("unassigned"); - for (ShardRouting shardRouting : getRoutingNodes().unassigned()) { - shardRouting.toXContent(builder, params); - } - builder.endArray(); - - builder.startObject("nodes"); - for (RoutingNode routingNode : getRoutingNodes()) { - builder.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId()); - for (ShardRouting shardRouting : routingNode) { - shardRouting.toXContent(builder, params); + return builder; + }, blocks.indices().entrySet().iterator(), entry -> Iterators.single((builder, params) -> { + builder.startObject(entry.getKey()); + for (ClusterBlock block : entry.getValue()) { + block.toXContent(builder, params); } - builder.endArray(); - } - builder.endObject(); - - builder.endObject(); - } - if (metrics.contains(Metric.CUSTOMS)) { - for (Map.Entry cursor : customs.entrySet()) { - builder.startObject(cursor.getKey()); - cursor.getValue().toXContent(builder, params); - builder.endObject(); - } - } - - return builder; + return builder.endObject(); + }), (builder, params) -> { + if (blocks().indices().isEmpty() == false) { + builder.endObject(); + } + return builder.endObject(); + }), + + // nodes + chunkedSection( + metrics.contains(Metric.NODES), + (builder, params) -> builder.startObject("nodes"), + nodes.iterator(), + Iterators::single, + (builder, params) -> builder.endObject() + ), + + // metadata + metrics.contains(Metric.METADATA) ? metadata.toXContentChunked(outerParams) : Collections.emptyIterator(), + + // routing table + chunkedSection( + metrics.contains(Metric.ROUTING_TABLE), + (builder, params) -> builder.startObject("routing_table").startObject("indices"), + routingTable().iterator(), + indexRoutingTable -> Iterators.single((builder, params) -> { + builder.startObject(indexRoutingTable.getIndex().getName()); + builder.startObject("shards"); + for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { + IndexShardRoutingTable indexShardRoutingTable = indexRoutingTable.shard(shardId); + builder.startArray(Integer.toString(indexShardRoutingTable.shardId().id())); + for (int copy = 0; copy < indexShardRoutingTable.size(); copy++) { + indexShardRoutingTable.shard(copy).toXContent(builder, params); + } + builder.endArray(); + } + return builder.endObject().endObject(); + }), + (builder, params) -> builder.endObject().endObject() + ), + + // routing nodes + chunkedSection( + metrics.contains(Metric.ROUTING_NODES), + (builder, params) -> builder.startObject("routing_nodes").startArray("unassigned"), + getRoutingNodes().unassigned().iterator(), + Iterators::single, + (builder, params) -> builder.endArray() // no endObject() here, continued in next chunkedSection() + ), + chunkedSection( + metrics.contains(Metric.ROUTING_NODES), + (builder, params) -> builder.startObject("nodes"), + getRoutingNodes().iterator(), + routingNode -> Iterators.concat( + ChunkedToXContentHelper.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId()), + routingNode.iterator(), + ChunkedToXContentHelper.endArray() + ), + (builder, params) -> builder.endObject().endObject() + ), + + // customs + metrics.contains(Metric.CUSTOMS) + ? Iterators.flatMap( + customs.entrySet().iterator(), + cursor -> ChunkedToXContentHelper.wrapWithObject(cursor.getKey(), cursor.getValue().toXContentChunked(outerParams)) + ) + : Collections.emptyIterator() + ); } public static Builder builder(ClusterName clusterName) { @@ -822,7 +841,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readVInt(); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } return builder.build(); @@ -838,7 +857,7 @@ public void writeTo(StreamOutput out) throws IOException { nodes.writeTo(out); blocks.writeTo(out); VersionedNamedWriteable.writeVersionedWritables(out, customs); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeVInt(-1); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } } @@ -885,7 +904,7 @@ private static class ClusterStateDiff implements Diff { metadata = Metadata.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readVInt(); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } } @@ -901,7 +920,7 @@ public void writeTo(StreamOutput out) throws IOException { metadata.writeTo(out); blocks.writeTo(out); customs.writeTo(out); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeVInt(-1); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index 02192b01991c..27de00e87966 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -21,7 +21,8 @@ */ public interface ClusterStateTaskExecutor { /** - * Update the cluster state based on the current state and the given tasks. Return the *same instance* if no update should be published. + * Update the cluster state based on the current state and the given tasks. Return {@code batchExecutionContext.initialState()} to avoid + * publishing any update. *

* If this method throws an exception then the cluster state is unchanged and every task's {@link ClusterStateTaskListener#onFailure} * method is called. @@ -30,8 +31,15 @@ public interface ClusterStateTaskExecutor { * This works ok but beware that constructing a whole new {@link ClusterState} can be somewhat expensive, and there may sometimes be * surprisingly many tasks to process in the batch. If it's possible to accumulate the effects of the tasks at a lower level then you * should do that instead. + *

+ * Returning {@code batchExecutionContext.initialState()} is an important and useful optimisation in most cases, but note that this + * fast-path exposes APIs to the risk of stale reads in the vicinity of a master failover: a node {@code N} that handles such a no-op + * task batch does not verify with its peers that it's still the master, and if it's not the master then another node {@code M} may + * already have become master and updated the state in a way that would be inconsistent with the response that {@code N} sends back to + * clients. * - * @return The resulting cluster state after executing all the tasks. If {code initialState} is returned then no update is published. + * @return The resulting cluster state after executing all the tasks. If {code batchExecutionContext.initialState()} is returned then no + * update is published. */ ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index fc23db6015fa..2bd7f971c9a5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -371,7 +371,7 @@ public List> getUpserts() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(deletes, (o, v) -> keySerializer.writeKey(v, o)); - Version version = out.getVersion(); + TransportVersion version = out.getTransportVersion(); // filter out custom states not supported by the other node int diffCount = 0; for (Map.Entry> diff : diffs) { @@ -499,14 +499,14 @@ public interface ValueSerializer { /** * Whether this serializer supports the version of the output stream */ - default boolean supportsVersion(Diff value, Version version) { + default boolean supportsVersion(Diff value, TransportVersion version) { return true; } /** * Whether this serializer supports the version of the output stream */ - default boolean supportsVersion(V value, Version version) { + default boolean supportsVersion(V value, TransportVersion version) { return true; } diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index c36ee576923a..40f3f344c46d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -32,7 +33,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.StoreStats; @@ -161,7 +161,7 @@ public void clusterChanged(ClusterChangedEvent event) { private class AsyncRefresh { private final List> thisRefreshListeners; - private final CountDown countDown = new CountDown(2); + private final RefCountingRunnable fetchRefs = new RefCountingRunnable(this::callListeners); AsyncRefresh(List> thisRefreshListeners) { this.thisRefreshListeners = thisRefreshListeners; @@ -177,15 +177,15 @@ void execute() { return; } - assert countDown.isCountedDown() == false; logger.trace("starting async refresh"); - try (var ignored = threadPool.getThreadContext().clearTraceContext()) { - fetchNodeStats(); - } - - try (var ignored = threadPool.getThreadContext().clearTraceContext()) { - fetchIndicesStats(); + try (var ignoredRefs = fetchRefs) { + try (var ignored = threadPool.getThreadContext().clearTraceContext()) { + fetchNodeStats(); + } + try (var ignored = threadPool.getThreadContext().clearTraceContext()) { + fetchIndicesStats(); + } } } @@ -200,10 +200,8 @@ private void fetchIndicesStats() { .stats( indicesStatsRequest, new ThreadedActionListener<>( - logger, - threadPool, - ThreadPool.Names.MANAGEMENT, - ActionListener.runAfter(new ActionListener<>() { + threadPool.executor(ThreadPool.Names.MANAGEMENT), + ActionListener.releaseAfter(new ActionListener<>() { @Override public void onResponse(IndicesStatsResponse indicesStatsResponse) { logger.trace("received indices stats response"); @@ -277,8 +275,7 @@ public void onFailure(Exception e) { } indicesStatsSummary = IndicesStatsSummary.EMPTY; } - }, this::onStatsProcessed), - false + }, fetchRefs.acquire()) ) ); } @@ -288,7 +285,7 @@ private void fetchNodeStats() { nodesStatsRequest.clear(); nodesStatsRequest.addMetric(NodesStatsRequest.Metric.FS.metricName()); nodesStatsRequest.timeout(fetchTimeout); - client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.runAfter(new ActionListener<>() { + client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.releaseAfter(new ActionListener<>() { @Override public void onResponse(NodesStatsResponse nodesStatsResponse) { logger.trace("received node stats response"); @@ -318,18 +315,12 @@ public void onFailure(Exception e) { leastAvailableSpaceUsages = Map.of(); mostAvailableSpaceUsages = Map.of(); } - }, this::onStatsProcessed)); - } - - private void onStatsProcessed() { - if (countDown.countDown()) { - logger.trace("stats all received, computing cluster info and notifying listeners"); - callListeners(); - } + }, fetchRefs.acquire())); } private void callListeners() { try { + logger.trace("stats all received, computing cluster info and notifying listeners"); final ClusterInfo clusterInfo = getClusterInfo(); boolean anyListeners = false; for (final Consumer listener : listeners) { diff --git a/server/src/main/java/org/elasticsearch/cluster/NamedDiff.java b/server/src/main/java/org/elasticsearch/cluster/NamedDiff.java index 598f43112f23..bda313b8bb6f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NamedDiff.java +++ b/server/src/main/java/org/elasticsearch/cluster/NamedDiff.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteable; /** @@ -18,6 +18,6 @@ public interface NamedDiff> extends Diff, NamedWriteabl /** * The minimal version of the recipient this custom object can be sent to */ - Version getMinimalSupportedVersion(); + TransportVersion getMinimalSupportedVersion(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java b/server/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java index 50c1d068aadf..d77afcd86a19 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java +++ b/server/src/main/java/org/elasticsearch/cluster/NamedDiffableValueSerializer.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; @@ -30,12 +30,12 @@ public T read(StreamInput in, String key) throws IOException { } @Override - public boolean supportsVersion(Diff value, Version version) { + public boolean supportsVersion(Diff value, TransportVersion version) { return version.onOrAfter(((NamedDiff) value).getMinimalSupportedVersion()); } @Override - public boolean supportsVersion(T value, Version version) { + public boolean supportsVersion(T value, TransportVersion version) { return version.onOrAfter(value.getMinimalSupportedVersion()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index f88ada956582..8809a883bd97 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -28,7 +28,6 @@ import org.elasticsearch.transport.TransportService; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -97,31 +96,25 @@ public void connectToNodes(DiscoveryNodes discoveryNodes, Runnable onCompletion) return; } - final GroupedActionListener listener = new GroupedActionListener<>( - ActionListener.wrap(onCompletion), - discoveryNodes.getSize() - ); - final List runnables = new ArrayList<>(discoveryNodes.getSize()); - synchronized (mutex) { - for (final DiscoveryNode discoveryNode : discoveryNodes) { - ConnectionTarget connectionTarget = targetsByNode.get(discoveryNode); - final boolean isNewNode = connectionTarget == null; - if (isNewNode) { - connectionTarget = new ConnectionTarget(discoveryNode); - targetsByNode.put(discoveryNode, connectionTarget); - } + try (var refs = new RefCountingRunnable(onCompletion)) { + synchronized (mutex) { + for (final DiscoveryNode discoveryNode : discoveryNodes) { + ConnectionTarget connectionTarget = targetsByNode.get(discoveryNode); + final boolean isNewNode = connectionTarget == null; + if (isNewNode) { + connectionTarget = new ConnectionTarget(discoveryNode); + targetsByNode.put(discoveryNode, connectionTarget); + } - if (isNewNode) { - logger.debug("connecting to {}", discoveryNode); - runnables.add( - connectionTarget.connect(ActionListener.runAfter(listener, () -> logger.debug("connected to {}", discoveryNode))) - ); - } else { - // known node, try and ensure it's connected but do not wait - logger.trace("checking connection to existing node [{}]", discoveryNode); - runnables.add(connectionTarget.connect(null)); - runnables.add(() -> listener.onResponse(null)); + if (isNewNode) { + logger.debug("connecting to {}", discoveryNode); + runnables.add(connectionTarget.connect(refs.acquire())); + } else { + // known node, try and ensure it's connected but do not wait + logger.trace("checking connection to existing node [{}]", discoveryNode); + runnables.add(connectionTarget.connect(null)); + } } } } @@ -153,18 +146,11 @@ public void disconnectFromNodesExcept(DiscoveryNodes discoveryNodes) { */ void ensureConnections(Runnable onCompletion) { final List runnables = new ArrayList<>(); - synchronized (mutex) { - final Collection connectionTargets = targetsByNode.values(); - if (connectionTargets.isEmpty()) { - runnables.add(onCompletion); - } else { + try (var refs = new RefCountingRunnable(onCompletion)) { + synchronized (mutex) { logger.trace("ensureConnections: {}", targetsByNode); - final GroupedActionListener listener = new GroupedActionListener<>( - ActionListener.wrap(onCompletion), - connectionTargets.size() - ); - for (final ConnectionTarget connectionTarget : connectionTargets) { - runnables.add(connectionTarget.connect(listener)); + for (ConnectionTarget connectionTarget : targetsByNode.values()) { + runnables.add(connectionTarget.connect(refs.acquire())); } } } @@ -225,6 +211,13 @@ private class ConnectionTarget { private final AtomicInteger consecutiveFailureCount = new AtomicInteger(); private final AtomicReference connectionRef = new AtomicReference<>(); + // all access to these fields is synchronized + private List pendingRefs; + private boolean connectionInProgress; + + // placeholder listener for a fire-and-forget connection attempt + private static final List NOOP = List.of(); + ConnectionTarget(DiscoveryNode discoveryNode) { this.discoveryNode = discoveryNode; } @@ -233,59 +226,97 @@ private void setConnectionRef(Releasable connectionReleasable) { Releasables.close(connectionRef.getAndSet(connectionReleasable)); } - Runnable connect(ActionListener listener) { + Runnable connect(Releasable onCompletion) { return () -> { - final boolean alreadyConnected = transportService.nodeConnected(discoveryNode); + registerRef(onCompletion); + doConnect(); + }; + } + + private synchronized void registerRef(Releasable ref) { + if (ref == null) { + pendingRefs = pendingRefs == null ? NOOP : pendingRefs; + return; + } + + if (pendingRefs == null || pendingRefs == NOOP) { + pendingRefs = new ArrayList<>(); + } + pendingRefs.add(ref); + } - if (alreadyConnected) { - logger.trace("refreshing connection to {}", discoveryNode); - } else { - logger.debug("connecting to {}", discoveryNode); + private synchronized Releasable acquireRefs() { + // Avoid concurrent connection attempts because they don't necessarily complete in order otherwise, and out-of-order completion + // might mean we end up disconnected from a node even though we triggered a call to connect() after all close() calls had + // finished. + if (connectionInProgress == false) { + var refs = pendingRefs; + if (refs != null) { + pendingRefs = null; + connectionInProgress = true; + return Releasables.wrap(refs); } + } + return null; + } + + private synchronized void releaseListener() { + assert connectionInProgress; + connectionInProgress = false; + } + + private void doConnect() { + // noinspection resource + var refs = acquireRefs(); + if (refs == null) { + return; + } + + final boolean alreadyConnected = transportService.nodeConnected(discoveryNode); - // It's possible that connectionRef is a reference to an older connection that closed out from under us, but that something - // else has opened a fresh connection to the node. Therefore we always call connectToNode() and update connectionRef. - transportService.connectToNode(discoveryNode, new ActionListener<>() { - @Override - public void onResponse(Releasable connectionReleasable) { - if (alreadyConnected) { - logger.trace("refreshed connection to {}", discoveryNode); - } else { - logger.debug("connected to {}", discoveryNode); - } - consecutiveFailureCount.set(0); - setConnectionRef(connectionReleasable); - - final boolean isActive; - synchronized (mutex) { - isActive = targetsByNode.get(discoveryNode) == ConnectionTarget.this; - } - if (isActive == false) { - logger.debug("connected to stale {} - releasing stale connection", discoveryNode); - setConnectionRef(null); - } - if (listener != null) { - listener.onResponse(null); - } + if (alreadyConnected) { + logger.trace("refreshing connection to {}", discoveryNode); + } else { + logger.debug("connecting to {}", discoveryNode); + } + + // It's possible that connectionRef is a reference to an older connection that closed out from under us, but that something else + // has opened a fresh connection to the node. Therefore we always call connectToNode() and update connectionRef. + transportService.connectToNode(discoveryNode, ActionListener.runAfter(new ActionListener<>() { + @Override + public void onResponse(Releasable connectionReleasable) { + if (alreadyConnected) { + logger.trace("refreshed connection to {}", discoveryNode); + } else { + logger.debug("connected to {}", discoveryNode); } + consecutiveFailureCount.set(0); + setConnectionRef(connectionReleasable); - @Override - public void onFailure(Exception e) { - final int currentFailureCount = consecutiveFailureCount.incrementAndGet(); - // only warn every 6th failure - final Level level = currentFailureCount % 6 == 1 ? Level.WARN : Level.DEBUG; - logger.log( - level, - () -> format("failed to connect to %s (tried [%s] times)", discoveryNode, currentFailureCount), - e - ); + final boolean isActive; + synchronized (mutex) { + isActive = targetsByNode.get(discoveryNode) == ConnectionTarget.this; + } + if (isActive == false) { + logger.debug("connected to stale {} - releasing stale connection", discoveryNode); setConnectionRef(null); - if (listener != null) { - listener.onFailure(e); - } } - }); - }; + Releasables.closeExpectNoException(refs); + } + + @Override + public void onFailure(Exception e) { + final int currentFailureCount = consecutiveFailureCount.incrementAndGet(); + // only warn every 6th failure + final Level level = currentFailureCount % 6 == 1 ? Level.WARN : Level.DEBUG; + logger.log(level, () -> format("failed to connect to %s (tried [%s] times)", discoveryNode, currentFailureCount), e); + setConnectionRef(null); + Releasables.closeExpectNoException(refs); + } + }, () -> { + releaseListener(); + transportService.getThreadPool().generic().execute(this::doConnect); + })); } void disconnect() { diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index 948e055e06ee..40f4d390059b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -7,15 +7,17 @@ */ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.repositories.RepositoryOperation; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Iterator; import java.util.List; public final class RepositoryCleanupInProgress extends AbstractNamedDiffable implements ClusterState.Custom { @@ -62,17 +64,17 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(TYPE); - for (Entry entry : entries) { - builder.startObject(); - { + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat( + Iterators.single((builder, params) -> builder.startArray(TYPE)), + entries.stream().map(entry -> (builder, params) -> { + builder.startObject(); builder.field("repository", entry.repository); - } - builder.endObject(); - } - builder.endArray(); - return builder; + builder.endObject(); + return builder; + }).iterator(), + Iterators.single((builder, params) -> builder.endArray()) + ); } @Override @@ -81,8 +83,8 @@ public String toString() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_4_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_4_0; } public static final class Entry implements Writeable, RepositoryOperation { diff --git a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 3b19fee0210b..4d41473b2d0c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -8,9 +8,10 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.cluster.ClusterState.Custom; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -18,7 +19,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -344,8 +344,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -361,7 +361,7 @@ public RestoreInProgress(StreamInput in) throws IOException { Snapshot snapshot = new Snapshot(in); State state = State.fromValue(in.readByte()); boolean quiet; - if (in.getVersion().onOrAfter(RestoreSnapshotRequest.VERSION_SUPPORTING_QUIET_PARAMETER)) { + if (in.getTransportVersion().onOrAfter(RestoreSnapshotRequest.VERSION_SUPPORTING_QUIET_PARAMETER)) { quiet = in.readBoolean(); } else { // Backwards compatibility: previously there was no logging of the start or completion of a snapshot restore @@ -389,7 +389,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeString(entry.uuid); entry.snapshot().writeTo(o); o.writeByte(entry.state().value()); - if (out.getVersion().onOrAfter(RestoreSnapshotRequest.VERSION_SUPPORTING_QUIET_PARAMETER)) { + if (out.getTransportVersion().onOrAfter(RestoreSnapshotRequest.VERSION_SUPPORTING_QUIET_PARAMETER)) { o.writeBoolean(entry.quiet()); } o.writeStringCollection(entry.indices); @@ -398,49 +398,41 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray("snapshots"); - for (Entry entry : entries.values()) { - toXContent(entry, builder); - } - builder.endArray(); - return builder; - } - - /** - * Serializes single restore operation - * - * @param entry restore operation metadata - * @param builder XContent builder - */ - public static void toXContent(Entry entry, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.field("snapshot", entry.snapshot().getSnapshotId().getName()); - builder.field("repository", entry.snapshot().getRepository()); - builder.field("state", entry.state()); - builder.startArray("indices"); - { - for (String index : entry.indices()) { - builder.value(index); - } - } - builder.endArray(); - builder.startArray("shards"); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardRestoreStatus status = shardEntry.getValue(); + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat( + Iterators.single((builder, params) -> builder.startArray("snapshots")), + entries.values().stream().map(entry -> (builder, params) -> { builder.startObject(); + builder.field("snapshot", entry.snapshot().getSnapshotId().getName()); + builder.field("repository", entry.snapshot().getRepository()); + builder.field("state", entry.state()); + builder.startArray("indices"); { - builder.field("index", shardId.getIndex()); - builder.field("shard", shardId.getId()); - builder.field("state", status.state()); + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.startArray("shards"); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardRestoreStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field("index", shardId.getIndex()); + builder.field("shard", shardId.getId()); + builder.field("state", status.state()); + } + builder.endObject(); + } } - builder.endObject(); - } - } - builder.endArray(); - builder.endObject(); + builder.endArray(); + builder.endObject(); + return builder; + }).iterator(), + Iterators.single((builder, params) -> builder.endArray()) + ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index ba9531fbe297..4f38256590bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -8,22 +8,24 @@ package org.elasticsearch.cluster; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.repositories.RepositoryOperation; import org.elasticsearch.snapshots.SnapshotId; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.Set; @@ -155,30 +157,32 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOException } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray(TYPE); - for (Entry entry : entries) { - builder.startObject(); - { - builder.field("repository", entry.repository()); - builder.startArray("snapshots"); - for (SnapshotId snapshot : entry.snapshots) { - builder.value(snapshot.getName()); + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat( + Iterators.single((builder, params) -> builder.startArray(TYPE)), + entries.stream().map(entry -> (builder, params) -> { + builder.startObject(); + { + builder.field("repository", entry.repository()); + builder.startArray("snapshots"); + for (SnapshotId snapshot : entry.snapshots) { + builder.value(snapshot.getName()); + } + builder.endArray(); + builder.timeField("start_time_millis", "start_time", entry.startTime); + builder.field("repository_state_id", entry.repositoryStateId); + builder.field("state", entry.state); } - builder.endArray(); - builder.timeField("start_time_millis", "start_time", entry.startTime); - builder.field("repository_state_id", entry.repositoryStateId); - builder.field("state", entry.state); - } - builder.endObject(); - } - builder.endArray(); - return builder; + builder.endObject(); + return builder; + }).iterator(), + Iterators.single((builder, params) -> builder.endArray()) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 279e7ba4a6b9..e9736fcff52a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -8,10 +8,12 @@ package org.elasticsearch.cluster; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -184,14 +186,14 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } - private static final Version DIFFABLE_VERSION = Version.V_8_5_0; + private static final TransportVersion DIFFABLE_VERSION = TransportVersion.V_8_5_0; public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(DIFFABLE_VERSION)) { + if (in.getTransportVersion().onOrAfter(DIFFABLE_VERSION)) { return new SnapshotInProgressDiff(in); } return readDiffFrom(Custom.class, TYPE, in); @@ -212,14 +214,12 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray("snapshots"); - final Iterator iterator = asStream().iterator(); - while (iterator.hasNext()) { - iterator.next().toXContent(builder, params); - } - builder.endArray(); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat( + Iterators.single((builder, params) -> builder.startArray("snapshots")), + asStream().iterator(), + Iterators.single((builder, params) -> builder.endArray()) + ); } @Override @@ -1613,8 +1613,8 @@ public SnapshotsInProgress apply(Custom part) { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } @Override @@ -1625,7 +1625,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { assert after != null : "should only write instances that were diffed from this node's state"; - if (out.getVersion().onOrAfter(DIFFABLE_VERSION)) { + if (out.getTransportVersion().onOrAfter(DIFFABLE_VERSION)) { mapDiff.writeTo(out); } else { new SimpleDiffable.CompleteDiff<>(after).writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CleanableResponseHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CleanableResponseHandler.java new file mode 100644 index 000000000000..ff1dafb99c4f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CleanableResponseHandler.java @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; + +/** + * Combines an ActionListenerResponseHandler with an ActionListener.runAfter action, but with an explicit type so that tests that simulate + * reboots can release resources without invoking the listener. + */ +public class CleanableResponseHandler extends ActionListenerResponseHandler { + private final Runnable cleanup; + + public CleanableResponseHandler(ActionListener listener, Writeable.Reader reader, String executor, Runnable cleanup) { + super(ActionListener.runAfter(listener, cleanup), reader, executor); + this.cleanup = cleanup; + } + + public void runCleanup() { + assert ThreadPool.assertCurrentThreadPool(); // should only be called from tests which simulate abrupt node restarts + cleanup.run(); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index 9e1772c7d844..028275ab1fa3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -129,14 +130,9 @@ void logBootstrapState(Metadata metadata) { if (bootstrapRequirements.isEmpty()) { logger.info("this node is locked into cluster UUID [{}] and will not attempt further cluster bootstrapping", clusterUUID); } else { - logger.warn( - """ - this node is locked into cluster UUID [{}] but [{}] is set to {}; \ - remove this setting to avoid possible data loss caused by subsequent cluster bootstrap attempts""", - clusterUUID, - INITIAL_MASTER_NODES_SETTING.getKey(), - bootstrapRequirements - ); + transportService.getThreadPool() + .scheduleWithFixedDelay(() -> logRemovalWarning(clusterUUID), TimeValue.timeValueHours(12), Names.SAME); + logRemovalWarning(clusterUUID); } } else { logger.info( @@ -147,6 +143,19 @@ void logBootstrapState(Metadata metadata) { } } + private void logRemovalWarning(String clusterUUID) { + logger.warn( + """ + this node is locked into cluster UUID [{}] but [{}] is set to {}; \ + remove this setting to avoid possible data loss caused by subsequent cluster bootstrap attempts; \ + for further information see {}""", + clusterUUID, + INITIAL_MASTER_NODES_SETTING.getKey(), + bootstrapRequirements, + ReferenceDocs.INITIAL_MASTER_NODES + ); + } + @Override public void onFoundPeersUpdated() { final Set nodes = getDiscoveredNodes(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 11488584b7ed..be82c8955a18 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.coordination.CoordinationState.VoteCollection; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -100,7 +101,11 @@ public void onFailure(Exception e) { protected void doRun() { if (isActive()) { logLastFailedJoinAttempt.run(); - logger.warn(clusterFormationStateSupplier.get().getDescription()); + logger.warn( + "{}; for troubleshooting guidance, see {}", + clusterFormationStateSupplier.get().getDescription(), + ReferenceDocs.DISCOVERY_TROUBLESHOOTING + ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index cb3e5ac8edfd..1bb83fc80a3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -556,27 +556,18 @@ public interface PersistedState extends Closeable { * marked as committed. */ default void markLastAcceptedStateAsCommitted() { - final ClusterState lastAcceptedState = getLastAcceptedState(); - Metadata.Builder metadataBuilder = null; - if (lastAcceptedState.getLastAcceptedConfiguration().equals(lastAcceptedState.getLastCommittedConfiguration()) == false) { - final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder(lastAcceptedState.coordinationMetadata()) - .lastCommittedConfiguration(lastAcceptedState.getLastAcceptedConfiguration()) - .build(); - metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); - metadataBuilder.coordinationMetadata(coordinationMetadata); - } + final var lastAcceptedState = getLastAcceptedState(); assert lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false : "received cluster state with empty cluster uuid: " + lastAcceptedState; - if (lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false - && lastAcceptedState.metadata().clusterUUIDCommitted() == false) { - if (metadataBuilder == null) { - metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); - } - metadataBuilder.clusterUUIDCommitted(true); + + if (lastAcceptedState.metadata().clusterUUIDCommitted() == false) { logger.info("cluster UUID set to [{}]", lastAcceptedState.metadata().clusterUUID()); } - if (metadataBuilder != null) { - setLastAcceptedState(ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build()); + + final var adjustedMetadata = lastAcceptedState.metadata() + .withLastCommittedValues(true, lastAcceptedState.getLastAcceptedConfiguration()); + if (adjustedMetadata != lastAcceptedState.metadata()) { + setLastAcceptedState(ClusterState.builder(lastAcceptedState).metadata(adjustedMetadata).build()); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 33e3528246d1..b9a0a6950f88 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -137,7 +137,7 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt private final AllocationService allocationService; private final JoinHelper joinHelper; private final JoinValidationService joinValidationService; - private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; + private final NodeLeftExecutor nodeLeftExecutor; private final Supplier persistedStateSupplier; private final NoMasterBlockService noMasterBlockService; final Object mutex = new Object(); // package-private to allow tests to call methods that assert that the mutex is held @@ -205,7 +205,7 @@ public Coordinator( this.transportService = transportService; this.masterService = masterService; this.allocationService = allocationService; - this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); + this.onJoinValidators = NodeJoinExecutor.addBuiltInJoinValidators(onJoinValidators); this.singleNodeDiscovery = DiscoveryModule.isSingleNodeDiscovery(settings); this.electionStrategy = electionStrategy; this.joinReasonService = new JoinReasonService(transportService.getThreadPool()::relativeTimeInMillis); @@ -272,7 +272,7 @@ public Coordinator( this::removeNode, nodeHealthService ); - this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService); + this.nodeLeftExecutor = new NodeLeftExecutor(allocationService); this.clusterApplier = clusterApplier; masterService.setClusterStateSupplier(this::getStateForMasterService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); @@ -339,16 +339,11 @@ private void onLeaderFailure(Supplier message, Exception e) { private void removeNode(DiscoveryNode discoveryNode, String reason) { synchronized (mutex) { if (mode == Mode.LEADER) { - var task = new NodeRemovalClusterStateTaskExecutor.Task( - discoveryNode, - reason, - () -> joinReasonService.onNodeRemoved(discoveryNode, reason) - ); masterService.submitStateUpdateTask( "node-left", - task, + new NodeLeftExecutor.Task(discoveryNode, reason, () -> joinReasonService.onNodeRemoved(discoveryNode, reason)), ClusterStateTaskConfig.build(Priority.IMMEDIATE), - nodeRemovalExecutor + nodeLeftExecutor ); } } @@ -664,7 +659,7 @@ private void validateJoinRequest(JoinRequest joinRequest, ActionListener v if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { // We do this in a couple of places including the cluster update thread. This one here is really just best effort to ensure // we fail as fast as possible. - JoinTaskExecutor.ensureVersionBarrier( + NodeJoinExecutor.ensureVersionBarrier( joinRequest.getSourceNode().getVersion(), stateForJoinValidation.getNodes().getMinNodeVersion() ); @@ -1002,6 +997,10 @@ protected void doStart() { + votingConfiguration ); } + final Metadata.Builder metadata = Metadata.builder(); + if (lastAcceptedState.metadata().clusterUUIDCommitted()) { + metadata.clusterUUID(lastAcceptedState.metadata().clusterUUID()).clusterUUIDCommitted(true); + } ClusterState initialState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) .blocks( ClusterBlocks.builder() @@ -1009,6 +1008,7 @@ protected void doStart() { .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) + .metadata(metadata) .build(); applierState = initialState; clusterApplier.setInitialState(initialState); @@ -1235,15 +1235,16 @@ ClusterState improveConfiguration(ClusterState clusterState) { ) .map(DiscoveryNode::getId); + DiscoveryNode localNode = getLocalNode(); final Set liveNodes = clusterState.nodes() .stream() .filter(DiscoveryNode::isMasterNode) - .filter(coordinationState.get()::containsJoinVoteFor) + .filter((n) -> coordinationState.get().containsJoinVoteFor(n) || n.equals(localNode)) .collect(Collectors.toSet()); final VotingConfiguration newConfig = reconfigurator.reconfigure( liveNodes, Stream.concat(masterIneligibleNodeIdsInVotingConfig, excludedNodeIds).collect(Collectors.toSet()), - getLocalNode(), + localNode, clusterState.getLastAcceptedConfiguration() ); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 6e996bc29a52..82abf4b4c7d5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -14,7 +14,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.cli.ProcessInfo; import org.elasticsearch.cli.Terminal; @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.DataStreamMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -37,6 +38,7 @@ import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.gateway.PersistedClusterStateService; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -45,6 +47,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.EnumSet; +import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -217,7 +220,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { + public TransportVersion getMinimalSupportedVersion() { assert false; throw new UnsupportedOperationException(); } @@ -229,8 +232,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.mapContents(contents); + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.single(((builder, params) -> builder.mapContents(contents))); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 48d6ba553cbb..ed5d28d69d79 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -67,7 +67,7 @@ public class JoinHelper { private final MasterService masterService; private final ClusterApplier clusterApplier; private final TransportService transportService; - private final JoinTaskExecutor joinTaskExecutor; + private final NodeJoinExecutor nodeJoinExecutor; private final LongSupplier currentTermSupplier; private final NodeHealthService nodeHealthService; private final JoinReasonService joinReasonService; @@ -94,7 +94,7 @@ public class JoinHelper { this.clusterApplier = clusterApplier; this.transportService = transportService; this.circuitBreakerService = circuitBreakerService; - this.joinTaskExecutor = new JoinTaskExecutor(allocationService, rerouteService); + this.nodeJoinExecutor = new NodeJoinExecutor(allocationService, rerouteService); this.currentTermSupplier = currentTermSupplier; this.nodeHealthService = nodeHealthService; this.joinReasonService = joinReasonService; @@ -389,13 +389,17 @@ default void close(Mode newMode) {} class LeaderJoinAccumulator implements JoinAccumulator { @Override public void handleJoinRequest(DiscoveryNode sender, ActionListener joinListener) { - final JoinTask task = JoinTask.singleNode( - sender, - joinReasonService.getJoinReason(sender, Mode.LEADER), - joinListener, - currentTermSupplier.getAsLong() + masterService.submitStateUpdateTask( + "node-join", + JoinTask.singleNode( + sender, + joinReasonService.getJoinReason(sender, Mode.LEADER), + joinListener, + currentTermSupplier.getAsLong() + ), + ClusterStateTaskConfig.build(Priority.URGENT), + nodeJoinExecutor ); - masterService.submitStateUpdateTask("node-join", task, ClusterStateTaskConfig.build(Priority.URGENT), joinTaskExecutor); } @Override @@ -461,7 +465,7 @@ public void close(Mode newMode) { "elected-as-master ([" + joinTask.nodeCount() + "] nodes joined)", joinTask, ClusterStateTaskConfig.build(Priority.URGENT), - joinTaskExecutor + nodeJoinExecutor ); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReason.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReason.java new file mode 100644 index 000000000000..356a17f1e110 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReason.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.core.Nullable; + +/** + * @param message Message describing the reason for the node joining + * @param guidanceDocs An optional link to troubleshooting guidance docs + */ +public record JoinReason(String message, @Nullable ReferenceDocs guidanceDocs) {} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReasonService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReasonService.java index b4aab2ea57ac..cf5ff1ff44c6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReasonService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinReasonService.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Nullable; @@ -107,9 +108,9 @@ public void onNodeRemoved(DiscoveryNode discoveryNode, String reason) { * @param currentMode The current mode of the master that the node is joining. * @return A description of the reason for the join, possibly including some details of its earlier removal. */ - public String getJoinReason(DiscoveryNode discoveryNode, Coordinator.Mode currentMode) { + public JoinReason getJoinReason(DiscoveryNode discoveryNode, Coordinator.Mode currentMode) { return trackedNodes.getOrDefault(discoveryNode.getId(), UNKNOWN_NODE) - .getDescription(relativeTimeInMillisSupplier.getAsLong(), discoveryNode.getEphemeralId(), currentMode); + .getJoinReason(relativeTimeInMillisSupplier.getAsLong(), discoveryNode.getEphemeralId(), currentMode); } /** @@ -134,7 +135,7 @@ private interface TrackedNode { TrackedNode withRemovalReason(String removalReason); - String getDescription(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode); + JoinReason getJoinReason(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode); long getRemovalAgeMillis(long currentTimeMillis); } @@ -161,11 +162,11 @@ public TrackedNode withRemovalReason(String removalReason) { } @Override - public String getDescription(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode) { + public JoinReason getJoinReason(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode) { if (currentMode == CANDIDATE) { - return "completing election"; + return COMPLETING_ELECTION; } else { - return "joining"; + return NEW_NODE_JOINING; } } @@ -193,11 +194,11 @@ public TrackedNode withRemovalReason(String removalReason) { } @Override - public String getDescription(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode) { + public JoinReason getJoinReason(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode) { if (currentMode == CANDIDATE) { - return "completing election"; + return COMPLETING_ELECTION; } else { - return "rejoining"; + return KNOWN_NODE_REJOINING; } } @@ -231,7 +232,7 @@ public TrackedNode withRemovalReason(String removalReason) { } @Override - public String getDescription(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode) { + public JoinReason getJoinReason(long currentTimeMillis, String joiningNodeEphemeralId, Coordinator.Mode currentMode) { final StringBuilder description = new StringBuilder(); if (currentMode == CANDIDATE) { description.append("completing election"); @@ -261,7 +262,7 @@ public String getDescription(long currentTimeMillis, String joiningNodeEphemeral description.append(", [").append(removalCount).append("] total removals"); } - return description.toString(); + return new JoinReason(description.toString(), isRestarted ? null : ReferenceDocs.UNSTABLE_CLUSTER_TROUBLESHOOTING); } @Override @@ -270,4 +271,7 @@ public long getRemovalAgeMillis(long currentTimeMillis) { } } + private static final JoinReason COMPLETING_ELECTION = new JoinReason("completing election", null); + private static final JoinReason NEW_NODE_JOINING = new JoinReason("joining", null); + private static final JoinReason KNOWN_NODE_REJOINING = new JoinReason("rejoining", null); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java index 10121337a85c..38f5b48e4d1b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java @@ -18,7 +18,7 @@ public record JoinTask(List nodeJoinTasks, boolean isBecomingMaster, long term) implements ClusterStateTaskListener { - public static JoinTask singleNode(DiscoveryNode node, String reason, ActionListener listener, long term) { + public static JoinTask singleNode(DiscoveryNode node, JoinReason reason, ActionListener listener, long term) { return new JoinTask(List.of(new NodeJoinTask(node, reason, listener)), false, term); } @@ -59,9 +59,9 @@ public Iterable nodes() { return () -> nodeJoinTasks.stream().map(j -> j.node).iterator(); } - public record NodeJoinTask(DiscoveryNode node, String reason, ActionListener listener) { + public record NodeJoinTask(DiscoveryNode node, JoinReason reason, ActionListener listener) { - public NodeJoinTask(DiscoveryNode node, String reason, ActionListener listener) { + public NodeJoinTask(DiscoveryNode node, JoinReason reason, ActionListener listener) { this.node = Objects.requireNonNull(node); this.reason = reason; this.listener = listener; @@ -76,7 +76,7 @@ public String toString() { public void appendDescription(StringBuilder stringBuilder) { node.appendDescriptionWithoutAttributes(stringBuilder); - stringBuilder.append(' ').append(reason); + stringBuilder.append(' ').append(reason.message()); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java deleted file mode 100644 index c1c4133dcc41..000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.cluster.coordination; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.NotMasterException; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.metadata.DesiredNodes; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.RerouteService; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.common.Priority; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.function.BiConsumer; -import java.util.stream.Collectors; - -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; - -public class JoinTaskExecutor implements ClusterStateTaskExecutor { - - private static final Logger logger = LogManager.getLogger(JoinTaskExecutor.class); - - private final AllocationService allocationService; - private final RerouteService rerouteService; - - public JoinTaskExecutor(AllocationService allocationService, RerouteService rerouteService) { - this.allocationService = allocationService; - this.rerouteService = rerouteService; - } - - @Override - public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { - // The current state that MasterService uses might have been updated by a (different) master in a higher term already. If so, stop - // processing the current cluster state update, there's no point in continuing to compute it as it will later be rejected by - // Coordinator#publish anyhow. - assert batchExecutionContext.taskContexts().isEmpty() == false : "Expected to have non empty join tasks list"; - - var term = batchExecutionContext.taskContexts().stream().mapToLong(t -> t.getTask().term()).max().getAsLong(); - - var split = batchExecutionContext.taskContexts().stream().collect(Collectors.partitioningBy(t -> t.getTask().term() == term)); - for (TaskContext outdated : split.get(false)) { - outdated.onFailure( - new NotMasterException("Higher term encountered (encountered: " + term + " > used: " + outdated.getTask().term() + ")") - ); - } - - final var joinTaskContexts = split.get(true); - final var initialState = batchExecutionContext.initialState(); - - if (initialState.term() > term) { - logger.trace("encountered higher term {} than current {}, there is a newer master", initialState.term(), term); - throw new NotMasterException( - "Higher term encountered (current: " + initialState.term() + " > used: " + term + "), there is a newer master" - ); - } - - final boolean isBecomingMaster = joinTaskContexts.stream().anyMatch(t -> t.getTask().isBecomingMaster()); - final DiscoveryNodes currentNodes = initialState.nodes(); - boolean nodesChanged = false; - ClusterState.Builder newState; - - if (currentNodes.getMasterNode() == null && isBecomingMaster) { - assert initialState.term() < term : "there should be at most one become master task per election (= by term)"; - // use these joins to try and become the master. - // Note that we don't have to do any validation of the amount of joining nodes - the commit - // during the cluster state publishing guarantees that we have enough - try (var ignored = batchExecutionContext.dropHeadersContext()) { - // suppress deprecation warnings e.g. from reroute() - newState = becomeMasterAndTrimConflictingNodes(initialState, joinTaskContexts, term); - } - nodesChanged = true; - } else if (currentNodes.isLocalNodeElectedMaster()) { - assert initialState.term() == term : "term should be stable for the same master"; - newState = ClusterState.builder(initialState); - } else { - logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); - throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); - } - - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); - - assert nodesBuilder.isLocalNodeElectedMaster(); - - Version minClusterNodeVersion = newState.nodes().getMinNodeVersion(); - Version maxClusterNodeVersion = newState.nodes().getMaxNodeVersion(); - // if the cluster is not fully-formed then the min version is not meaningful - final boolean enforceVersionBarrier = initialState.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false; - // processing any joins - Map joinedNodeIdsByNodeName = new HashMap<>(); - for (final var joinTaskContext : joinTaskContexts) { - final var joinTask = joinTaskContext.getTask(); - final List onTaskSuccess = new ArrayList<>(joinTask.nodeCount()); - for (final JoinTask.NodeJoinTask nodeJoinTask : joinTask.nodeJoinTasks()) { - final DiscoveryNode node = nodeJoinTask.node(); - if (currentNodes.nodeExistsWithSameRoles(node)) { - logger.debug("received a join request for an existing node [{}]", node); - } else { - try { - if (enforceVersionBarrier) { - ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); - } - ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); - // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices - // we have to reject nodes that don't support all indices we have in this cluster - ensureIndexCompatibility(node.getVersion(), initialState.getMetadata()); - nodesBuilder.add(node); - nodesChanged = true; - minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); - maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); - if (node.isMasterNode()) { - joinedNodeIdsByNodeName.put(node.getName(), node.getId()); - } - } catch (IllegalArgumentException | IllegalStateException e) { - onTaskSuccess.add(() -> nodeJoinTask.listener().onFailure(e)); - continue; - } - } - onTaskSuccess.add(() -> nodeJoinTask.listener().onResponse(null)); - } - joinTaskContext.success(() -> { - for (Runnable joinCompleter : onTaskSuccess) { - joinCompleter.run(); - } - }); - } - - if (nodesChanged) { - rerouteService.reroute( - "post-join reroute", - Priority.HIGH, - ActionListener.wrap(r -> logger.trace("post-join reroute completed"), e -> logger.debug("post-join reroute failed", e)) - ); - - if (joinedNodeIdsByNodeName.isEmpty() == false) { - final var currentVotingConfigExclusions = initialState.getVotingConfigExclusions(); - final var newVotingConfigExclusions = currentVotingConfigExclusions.stream().map(e -> { - // Update nodeId in VotingConfigExclusion when a new node with excluded node name joins - if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId()) - && joinedNodeIdsByNodeName.containsKey(e.getNodeName())) { - return new CoordinationMetadata.VotingConfigExclusion( - joinedNodeIdsByNodeName.get(e.getNodeName()), - e.getNodeName() - ); - } else { - return e; - } - }).collect(Collectors.toSet()); - - // if VotingConfigExclusions did get updated - if (newVotingConfigExclusions.equals(currentVotingConfigExclusions) == false) { - final var coordMetadataBuilder = CoordinationMetadata.builder(initialState.coordinationMetadata()) - .term(term) - .clearVotingConfigExclusions(); - newVotingConfigExclusions.forEach(coordMetadataBuilder::addVotingConfigExclusion); - newState.metadata(Metadata.builder(initialState.metadata()).coordinationMetadata(coordMetadataBuilder.build()).build()); - } - } - - final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded( - newState.nodes(nodesBuilder).build() - ); - final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes); - assert enforceVersionBarrier == false - || updatedState.nodes().getMinNodeVersion().onOrAfter(initialState.nodes().getMinNodeVersion()) - : "min node version decreased from [" - + initialState.nodes().getMinNodeVersion() - + "] to [" - + updatedState.nodes().getMinNodeVersion() - + "]"; - return updatedState; - } else { - // we must return a new cluster state instance to force publishing. This is important - // for the joining node to finalize its join and set us as a master - return newState.build(); - } - } - - protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( - ClusterState currentState, - List> taskContexts, - long term - ) { - assert currentState.nodes().getMasterNodeId() == null : currentState; - assert currentState.term() < term : term + " vs " + currentState; - DiscoveryNodes currentNodes = currentState.nodes(); - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); - nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); - - for (final var taskContext : taskContexts) { - for (final var joiningNode : taskContext.getTask().nodes()) { - final DiscoveryNode nodeWithSameId = nodesBuilder.get(joiningNode.getId()); - if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) { - logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", nodeWithSameId, joiningNode); - nodesBuilder.remove(nodeWithSameId.getId()); - } - final DiscoveryNode nodeWithSameAddress = currentNodes.findByAddress(joiningNode.getAddress()); - if (nodeWithSameAddress != null && nodeWithSameAddress.equals(joiningNode) == false) { - logger.debug( - "removing existing node [{}], which conflicts with incoming join from [{}]", - nodeWithSameAddress, - joiningNode - ); - nodesBuilder.remove(nodeWithSameAddress.getId()); - } - } - } - - // now trim any left over dead nodes - either left there when the previous master stepped down - // or removed by us above - ClusterState tmpState = ClusterState.builder(currentState) - .nodes(nodesBuilder) - .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) - .metadata( - Metadata.builder(currentState.metadata()) - .coordinationMetadata(CoordinationMetadata.builder(currentState.coordinationMetadata()).term(term).build()) - .build() - ) - .build(); - logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); - allocationService.cleanCaches(); - tmpState = PersistentTasksCustomMetadata.disassociateDeadNodes(tmpState); - return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); - } - - @Override - public boolean runOnlyOnMaster() { - // we validate that we are allowed to change the cluster state during cluster state processing - return false; - } - - /** - * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata - * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index - * compatibility version. - * @see Version#minimumIndexCompatibilityVersion() - * @throws IllegalStateException if any index is incompatible with the given version - */ - public static void ensureIndexCompatibility(final Version nodeVersion, Metadata metadata) { - Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion(); - // we ensure that all indices in the cluster we join are compatible with us no matter if they are - // closed or not we can't read mappings of these indices so we need to reject the join... - for (IndexMetadata idxMetadata : metadata) { - if (idxMetadata.getCompatibilityVersion().after(nodeVersion)) { - throw new IllegalStateException( - "index " - + idxMetadata.getIndex() - + " version not supported: " - + idxMetadata.getCompatibilityVersion() - + " the node version is: " - + nodeVersion - ); - } - if (idxMetadata.getCompatibilityVersion().before(supportedIndexVersion)) { - throw new IllegalStateException( - "index " - + idxMetadata.getIndex() - + " version not supported: " - + idxMetadata.getCompatibilityVersion() - + " minimum compatible index version is: " - + supportedIndexVersion - ); - } - } - } - - /** ensures that the joining node has a version that's compatible with all current nodes*/ - public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) { - final Version minNodeVersion = currentNodes.getMinNodeVersion(); - final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); - ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion); - } - - /** ensures that the joining node has a version that's compatible with a given version range */ - public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) { - assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion; - if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) { - throw new IllegalStateException( - "node version [" - + joiningNodeVersion - + "] is not supported. " - + "The cluster contains nodes with version [" - + maxClusterNodeVersion - + "], which is incompatible." - ); - } - if (joiningNodeVersion.isCompatible(minClusterNodeVersion) == false) { - throw new IllegalStateException( - "node version [" - + joiningNodeVersion - + "] is not supported." - + "The cluster contains nodes with version [" - + minClusterNodeVersion - + "], which is incompatible." - ); - } - } - - /** - * ensures that the joining node's version is equal or higher to the minClusterNodeVersion. This is needed - * to ensure that if the master is already fully operating under the new version, it doesn't go back to mixed - * version mode - **/ - public static void ensureVersionBarrier(Version joiningNodeVersion, Version minClusterNodeVersion) { - if (joiningNodeVersion.before(minClusterNodeVersion)) { - throw new IllegalStateException( - "node version [" - + joiningNodeVersion - + "] may not join a cluster comprising only nodes of version [" - + minClusterNodeVersion - + "] or greater" - ); - } - } - - public static Collection> addBuiltInJoinValidators( - Collection> onJoinValidators - ) { - final Collection> validators = new ArrayList<>(); - validators.add((node, state) -> { - ensureNodesCompatibility(node.getVersion(), state.getNodes()); - ensureIndexCompatibility(node.getVersion(), state.getMetadata()); - }); - validators.addAll(onJoinValidators); - return Collections.unmodifiableCollection(validators); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index dee938d5e0bb..ecadc771ebda 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -308,12 +308,13 @@ protected void doRun() throws Exception { transportService.sendRequest( discoveryNode, JOIN_VALIDATE_ACTION_NAME, - new BytesTransportRequest(bytes, discoveryNode.getVersion()), + new BytesTransportRequest(bytes, discoveryNode.getVersion().transportVersion), REQUEST_OPTIONS, - new ActionListenerResponseHandler<>( - ActionListener.runAfter(listener, bytes::decRef), + new CleanableResponseHandler<>( + listener, in -> TransportResponse.Empty.INSTANCE, - ThreadPool.Names.CLUSTER_COORDINATION + ThreadPool.Names.CLUSTER_COORDINATION, + bytes::decRef ) ); if (cachedBytes == null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java index 6748fa9980d1..5e21f7c69341 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.cluster.coordination; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; @@ -15,8 +16,13 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.PrioritizedThrottledTaskRunner; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -25,6 +31,8 @@ import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -196,11 +204,13 @@ static class HotThreadsLoggingLagListener implements LagListener { private final TransportService transportService; private final Client client; private final LagListener delegate; + private final PrioritizedThrottledTaskRunner loggingTaskRunner; HotThreadsLoggingLagListener(TransportService transportService, Client client, LagListener delegate) { this.transportService = transportService; this.client = client; this.delegate = delegate; + this.loggingTaskRunner = new PrioritizedThrottledTaskRunner<>("hot_threads", 1, transportService.getThreadPool().generic()); } @Override @@ -224,12 +234,13 @@ public void onResponse(NodesHotThreadsResponse nodesHotThreadsResponse) { return; } - logger.debug( - "hot threads from node [{}] lagging at version [{}] despite commit of cluster state version [{}]:\n{}", - discoveryNode.descriptionWithoutAttributes(), - appliedVersion, - expectedVersion, - nodesHotThreadsResponse.getNodes().get(0).getHotThreads() + loggingTaskRunner.enqueueTask( + new HotThreadsLoggingTask( + discoveryNode, + appliedVersion, + expectedVersion, + nodesHotThreadsResponse.getNodes().get(0).getHotThreads() + ) ); } @@ -281,4 +292,42 @@ public void onFailure(Exception e) { } } + static class HotThreadsLoggingTask extends AbstractRunnable implements Comparable { + + private final String nodeHotThreads; + private final String prefix; + + HotThreadsLoggingTask(DiscoveryNode discoveryNode, long appliedVersion, long expectedVersion, String nodeHotThreads) { + this.nodeHotThreads = nodeHotThreads; + this.prefix = Strings.format( + "hot threads from node [%s] lagging at version [%d] despite commit of cluster state version [%d]", + discoveryNode.descriptionWithoutAttributes(), + appliedVersion, + expectedVersion + ); + } + + @Override + public void onFailure(Exception e) { + logger.error(Strings.format("unexpected exception reporting %s", prefix), e); + } + + @Override + protected void doRun() throws Exception { + try ( + var writer = new OutputStreamWriter( + ChunkedLoggingStream.create(logger, Level.DEBUG, prefix, ReferenceDocs.LAGGING_NODE_TROUBLESHOOTING), + StandardCharsets.UTF_8 + ) + ) { + writer.write(nodeHotThreads); + } + } + + @Override + public int compareTo(HotThreadsLoggingTask o) { + return 0; + } + } + } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java new file mode 100644 index 000000000000..8a63fe027696 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -0,0 +1,363 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.cluster.coordination; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.NotMasterException; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.DesiredNodes; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RerouteService; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; + +public class NodeJoinExecutor implements ClusterStateTaskExecutor { + + private static final Logger logger = LogManager.getLogger(NodeJoinExecutor.class); + + private final AllocationService allocationService; + private final RerouteService rerouteService; + + public NodeJoinExecutor(AllocationService allocationService, RerouteService rerouteService) { + this.allocationService = allocationService; + this.rerouteService = rerouteService; + } + + @Override + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + // The current state that MasterService uses might have been updated by a (different) master in a higher term already. If so, stop + // processing the current cluster state update, there's no point in continuing to compute it as it will later be rejected by + // Coordinator#publish anyhow. + assert batchExecutionContext.taskContexts().isEmpty() == false : "Expected to have non empty join tasks list"; + + var term = batchExecutionContext.taskContexts().stream().mapToLong(t -> t.getTask().term()).max().getAsLong(); + + var split = batchExecutionContext.taskContexts().stream().collect(Collectors.partitioningBy(t -> t.getTask().term() == term)); + for (TaskContext outdated : split.get(false)) { + outdated.onFailure( + new NotMasterException("Higher term encountered (encountered: " + term + " > used: " + outdated.getTask().term() + ")") + ); + } + + final var joinTaskContexts = split.get(true); + final var initialState = batchExecutionContext.initialState(); + + if (initialState.term() > term) { + logger.trace("encountered higher term {} than current {}, there is a newer master", initialState.term(), term); + throw new NotMasterException( + "Higher term encountered (current: " + initialState.term() + " > used: " + term + "), there is a newer master" + ); + } + + final boolean isBecomingMaster = joinTaskContexts.stream().anyMatch(t -> t.getTask().isBecomingMaster()); + final DiscoveryNodes currentNodes = initialState.nodes(); + boolean nodesChanged = false; + ClusterState.Builder newState; + + if (currentNodes.getMasterNode() == null && isBecomingMaster) { + assert initialState.term() < term : "there should be at most one become master task per election (= by term)"; + // use these joins to try and become the master. + // Note that we don't have to do any validation of the amount of joining nodes - the commit + // during the cluster state publishing guarantees that we have enough + try (var ignored = batchExecutionContext.dropHeadersContext()) { + // suppress deprecation warnings e.g. from reroute() + newState = becomeMasterAndTrimConflictingNodes(initialState, joinTaskContexts, term); + } + nodesChanged = true; + } else if (currentNodes.isLocalNodeElectedMaster()) { + assert initialState.term() == term : "term should be stable for the same master"; + newState = ClusterState.builder(initialState); + } else { + logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); + throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); + } + + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); + + assert nodesBuilder.isLocalNodeElectedMaster(); + + Version minClusterNodeVersion = newState.nodes().getMinNodeVersion(); + Version maxClusterNodeVersion = newState.nodes().getMaxNodeVersion(); + // if the cluster is not fully-formed then the min version is not meaningful + final boolean enforceVersionBarrier = initialState.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false; + // processing any joins + Map joinedNodeIdsByNodeName = new HashMap<>(); + for (final var joinTaskContext : joinTaskContexts) { + final var joinTask = joinTaskContext.getTask(); + final List onTaskSuccess = new ArrayList<>(joinTask.nodeCount()); + for (final JoinTask.NodeJoinTask nodeJoinTask : joinTask.nodeJoinTasks()) { + final DiscoveryNode node = nodeJoinTask.node(); + if (currentNodes.nodeExistsWithSameRoles(node)) { + logger.debug("received a join request for an existing node [{}]", node); + } else { + try { + if (enforceVersionBarrier) { + ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); + } + ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices + // we have to reject nodes that don't support all indices we have in this cluster + ensureIndexCompatibility(node.getVersion(), initialState.getMetadata()); + nodesBuilder.add(node); + nodesChanged = true; + minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); + maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); + if (node.isMasterNode()) { + joinedNodeIdsByNodeName.put(node.getName(), node.getId()); + } + } catch (IllegalArgumentException | IllegalStateException e) { + onTaskSuccess.add(() -> nodeJoinTask.listener().onFailure(e)); + continue; + } + } + onTaskSuccess.add(() -> { + final var reason = nodeJoinTask.reason(); + if (reason.guidanceDocs() == null) { + logger.info( + "node-join: [{}] with reason [{}]", + nodeJoinTask.node().descriptionWithoutAttributes(), + reason.message() + ); + } else { + logger.warn( + "node-join: [{}] with reason [{}]; for troubleshooting guidance, see {}", + nodeJoinTask.node().descriptionWithoutAttributes(), + reason.message(), + reason.guidanceDocs() + ); + } + nodeJoinTask.listener().onResponse(null); + }); + } + joinTaskContext.success(() -> { + for (Runnable joinCompleter : onTaskSuccess) { + joinCompleter.run(); + } + }); + } + + if (nodesChanged) { + rerouteService.reroute( + "post-join reroute", + Priority.HIGH, + ActionListener.wrap(r -> logger.trace("post-join reroute completed"), e -> logger.debug("post-join reroute failed", e)) + ); + + if (joinedNodeIdsByNodeName.isEmpty() == false) { + final var currentVotingConfigExclusions = initialState.getVotingConfigExclusions(); + final var newVotingConfigExclusions = currentVotingConfigExclusions.stream().map(e -> { + // Update nodeId in VotingConfigExclusion when a new node with excluded node name joins + if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId()) + && joinedNodeIdsByNodeName.containsKey(e.getNodeName())) { + return new CoordinationMetadata.VotingConfigExclusion( + joinedNodeIdsByNodeName.get(e.getNodeName()), + e.getNodeName() + ); + } else { + return e; + } + }).collect(Collectors.toSet()); + + // if VotingConfigExclusions did get updated + if (newVotingConfigExclusions.equals(currentVotingConfigExclusions) == false) { + final var coordMetadataBuilder = CoordinationMetadata.builder(initialState.coordinationMetadata()) + .term(term) + .clearVotingConfigExclusions(); + newVotingConfigExclusions.forEach(coordMetadataBuilder::addVotingConfigExclusion); + newState.metadata(Metadata.builder(initialState.metadata()).coordinationMetadata(coordMetadataBuilder.build()).build()); + } + } + + final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded( + newState.nodes(nodesBuilder).build() + ); + final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes); + assert enforceVersionBarrier == false + || updatedState.nodes().getMinNodeVersion().onOrAfter(initialState.nodes().getMinNodeVersion()) + : "min node version decreased from [" + + initialState.nodes().getMinNodeVersion() + + "] to [" + + updatedState.nodes().getMinNodeVersion() + + "]"; + return updatedState; + } else { + // we must return a new cluster state instance to force publishing. This is important + // for the joining node to finalize its join and set us as a master + return newState.build(); + } + } + + protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( + ClusterState currentState, + List> taskContexts, + long term + ) { + assert currentState.nodes().getMasterNodeId() == null : currentState; + assert currentState.term() < term : term + " vs " + currentState; + DiscoveryNodes currentNodes = currentState.nodes(); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); + nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); + + for (final var taskContext : taskContexts) { + for (final var joiningNode : taskContext.getTask().nodes()) { + final DiscoveryNode nodeWithSameId = nodesBuilder.get(joiningNode.getId()); + if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) { + logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", nodeWithSameId, joiningNode); + nodesBuilder.remove(nodeWithSameId.getId()); + } + final DiscoveryNode nodeWithSameAddress = currentNodes.findByAddress(joiningNode.getAddress()); + if (nodeWithSameAddress != null && nodeWithSameAddress.equals(joiningNode) == false) { + logger.debug( + "removing existing node [{}], which conflicts with incoming join from [{}]", + nodeWithSameAddress, + joiningNode + ); + nodesBuilder.remove(nodeWithSameAddress.getId()); + } + } + } + + // now trim any left over dead nodes - either left there when the previous master stepped down + // or removed by us above + ClusterState tmpState = ClusterState.builder(currentState) + .nodes(nodesBuilder) + .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) + .metadata( + Metadata.builder(currentState.metadata()) + .coordinationMetadata(CoordinationMetadata.builder(currentState.coordinationMetadata()).term(term).build()) + .build() + ) + .build(); + logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); + allocationService.cleanCaches(); + tmpState = PersistentTasksCustomMetadata.disassociateDeadNodes(tmpState); + return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); + } + + @Override + public boolean runOnlyOnMaster() { + // we validate that we are allowed to change the cluster state during cluster state processing + return false; + } + + /** + * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata + * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index + * compatibility version. + * @see Version#minimumIndexCompatibilityVersion() + * @throws IllegalStateException if any index is incompatible with the given version + */ + public static void ensureIndexCompatibility(final Version nodeVersion, Metadata metadata) { + Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion(); + // we ensure that all indices in the cluster we join are compatible with us no matter if they are + // closed or not we can't read mappings of these indices so we need to reject the join... + for (IndexMetadata idxMetadata : metadata) { + if (idxMetadata.getCompatibilityVersion().after(nodeVersion)) { + throw new IllegalStateException( + "index " + + idxMetadata.getIndex() + + " version not supported: " + + idxMetadata.getCompatibilityVersion() + + " the node version is: " + + nodeVersion + ); + } + if (idxMetadata.getCompatibilityVersion().before(supportedIndexVersion)) { + throw new IllegalStateException( + "index " + + idxMetadata.getIndex() + + " version not supported: " + + idxMetadata.getCompatibilityVersion() + + " minimum compatible index version is: " + + supportedIndexVersion + ); + } + } + } + + /** ensures that the joining node has a version that's compatible with all current nodes*/ + public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) { + final Version minNodeVersion = currentNodes.getMinNodeVersion(); + final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); + ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion); + } + + /** ensures that the joining node has a version that's compatible with a given version range */ + public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) { + assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion; + if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) { + throw new IllegalStateException( + "node version [" + + joiningNodeVersion + + "] is not supported. " + + "The cluster contains nodes with version [" + + maxClusterNodeVersion + + "], which is incompatible." + ); + } + if (joiningNodeVersion.isCompatible(minClusterNodeVersion) == false) { + throw new IllegalStateException( + "node version [" + + joiningNodeVersion + + "] is not supported." + + "The cluster contains nodes with version [" + + minClusterNodeVersion + + "], which is incompatible." + ); + } + } + + /** + * ensures that the joining node's version is equal or higher to the minClusterNodeVersion. This is needed + * to ensure that if the master is already fully operating under the new version, it doesn't go back to mixed + * version mode + **/ + public static void ensureVersionBarrier(Version joiningNodeVersion, Version minClusterNodeVersion) { + if (joiningNodeVersion.before(minClusterNodeVersion)) { + throw new IllegalStateException( + "node version [" + + joiningNodeVersion + + "] may not join a cluster comprising only nodes of version [" + + minClusterNodeVersion + + "] or greater" + ); + } + } + + public static Collection> addBuiltInJoinValidators( + Collection> onJoinValidators + ) { + final Collection> validators = new ArrayList<>(); + validators.add((node, state) -> { + ensureNodesCompatibility(node.getVersion(), state.getNodes()); + ensureIndexCompatibility(node.getVersion(), state.getMetadata()); + }); + validators.addAll(onJoinValidators); + return Collections.unmodifiableCollection(validators); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java new file mode 100644 index 000000000000..aad175c97098 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.cluster.coordination; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; + +public class NodeLeftExecutor implements ClusterStateTaskExecutor { + + private static final Logger logger = LogManager.getLogger(NodeLeftExecutor.class); + + private final AllocationService allocationService; + + public record Task(DiscoveryNode node, String reason, Runnable onClusterStateProcessed) implements ClusterStateTaskListener { + + @Override + public void onFailure(final Exception e) { + logger.log(MasterService.isPublishFailureException(e) ? Level.DEBUG : Level.ERROR, "unexpected failure during [node-left]", e); + } + + @Override + public String toString() { + final StringBuilder stringBuilder = new StringBuilder(); + node.appendDescriptionWithoutAttributes(stringBuilder); + stringBuilder.append(" reason: ").append(reason); + return stringBuilder.toString(); + } + } + + public NodeLeftExecutor(AllocationService allocationService) { + this.allocationService = allocationService; + } + + @Override + public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { + final ClusterState initialState = batchExecutionContext.initialState(); + final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); + boolean removed = false; + for (final var taskContext : batchExecutionContext.taskContexts()) { + final var task = taskContext.getTask(); + final String reason; + if (initialState.nodes().nodeExists(task.node())) { + remainingNodesBuilder.remove(task.node()); + removed = true; + reason = task.reason(); + } else { + logger.debug("node [{}] does not exist in cluster state, ignoring", task); + reason = null; + } + taskContext.success(() -> { + if (reason != null) { + logger.info("node-left: [{}] with reason [{}]", task.node().descriptionWithoutAttributes(), reason); + } + task.onClusterStateProcessed.run(); + }); + } + + if (removed == false) { + // no nodes to remove, keep the current cluster state + return initialState; + } + + try (var ignored = batchExecutionContext.dropHeadersContext()) { + // suppress deprecation warnings e.g. from reroute() + + final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder); + final var ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); + return allocationService.disassociateDeadNodes( + ptasksDisassociatedState, + true, + describeTasks(batchExecutionContext.taskContexts().stream().map(TaskContext::getTask).toList()) + ); + } + } + + // visible for testing + // hook is used in testing to ensure that correct cluster state is used to test whether a + // rejoin or reroute is needed + protected ClusterState remainingNodesClusterState(final ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) { + return ClusterState.builder(currentState).nodes(remainingNodesBuilder).build(); + } + +} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java deleted file mode 100644 index 7cf929d6da87..000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.cluster.coordination; - -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateTaskExecutor; -import org.elasticsearch.cluster.ClusterStateTaskListener; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; - -public class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExecutor { - - private static final Logger logger = LogManager.getLogger(NodeRemovalClusterStateTaskExecutor.class); - - private final AllocationService allocationService; - - public record Task(DiscoveryNode node, String reason, Runnable onClusterStateProcessed) implements ClusterStateTaskListener { - - @Override - public void onFailure(final Exception e) { - logger.log(MasterService.isPublishFailureException(e) ? Level.DEBUG : Level.ERROR, "unexpected failure during [node-left]", e); - } - - @Override - public String toString() { - final StringBuilder stringBuilder = new StringBuilder(); - node.appendDescriptionWithoutAttributes(stringBuilder); - stringBuilder.append(" reason: ").append(reason); - return stringBuilder.toString(); - } - } - - public NodeRemovalClusterStateTaskExecutor(AllocationService allocationService) { - this.allocationService = allocationService; - } - - @Override - public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { - final ClusterState initialState = batchExecutionContext.initialState(); - final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); - boolean removed = false; - for (final var taskContext : batchExecutionContext.taskContexts()) { - final var task = taskContext.getTask(); - if (initialState.nodes().nodeExists(task.node())) { - remainingNodesBuilder.remove(task.node()); - removed = true; - } else { - logger.debug("node [{}] does not exist in cluster state, ignoring", task); - } - taskContext.success(task.onClusterStateProcessed::run); - } - - if (removed == false) { - // no nodes to remove, keep the current cluster state - return initialState; - } - - try (var ignored = batchExecutionContext.dropHeadersContext()) { - // suppress deprecation warnings e.g. from reroute() - - final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder); - final var ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); - return allocationService.disassociateDeadNodes( - ptasksDisassociatedState, - true, - describeTasks(batchExecutionContext.taskContexts().stream().map(TaskContext::getTask).toList()) - ); - } - } - - // visible for testing - // hook is used in testing to ensure that correct cluster state is used to test whether a - // rejoin or reroute is needed - protected ClusterState remainingNodesClusterState(final ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) { - return ClusterState.builder(currentState).nodes(remainingNodesBuilder).build(); - } - -} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 5dd308640cc9..4d6b4ce1edd0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -10,9 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStatePublicationEvent; @@ -86,6 +86,8 @@ public class PublicationTransportHandler { TransportRequestOptions.Type.STATE ); + public static final TransportVersion INCLUDES_LAST_COMMITTED_DATA_VERSION = TransportVersion.V_8_6_0; + private final SerializationStatsTracker serializationStatsTracker = new SerializationStatsTracker(); public PublicationTransportHandler( @@ -124,13 +126,14 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque in = new InputStreamStreamInput(compressor.threadLocalInputStream(in)); } in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry); - in.setVersion(request.version()); + in.setTransportVersion(request.version()); // If true we received full cluster state - otherwise diffs if (in.readBoolean()) { final ClusterState incomingState; // Close early to release resources used by the de-compression as early as possible try (StreamInput input = in) { incomingState = ClusterState.readFrom(input, transportService.getLocalNode()); + assert input.read() == -1; } catch (Exception e) { logger.warn("unexpected error while deserializing an incoming cluster state", e); assert false : e; @@ -151,11 +154,30 @@ private PublishWithJoinResponse handleIncomingPublishRequest(BytesTransportReque ClusterState incomingState; try { final Diff diff; + final boolean includesLastCommittedData = request.version().onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION); + final boolean clusterUuidCommitted; + final CoordinationMetadata.VotingConfiguration lastCommittedConfiguration; + // Close stream early to release resources used by the de-compression as early as possible try (StreamInput input = in) { diff = ClusterState.readDiffFrom(input, lastSeen.nodes().getLocalNode()); + if (includesLastCommittedData) { + clusterUuidCommitted = in.readBoolean(); + lastCommittedConfiguration = new CoordinationMetadata.VotingConfiguration(in); + } else { + clusterUuidCommitted = false; + lastCommittedConfiguration = null; + } + assert input.read() == -1; } incomingState = diff.apply(lastSeen); // might throw IncompatibleClusterStateVersionException + if (includesLastCommittedData) { + final var adjustedMetadata = incomingState.metadata() + .withLastCommittedValues(clusterUuidCommitted, lastCommittedConfiguration); + if (adjustedMetadata != incomingState.metadata()) { + incomingState = ClusterState.builder(incomingState).metadata(adjustedMetadata).build(); + } + } } catch (IncompatibleClusterStateVersionException e) { incompatibleClusterStateDiffReceivedCount.incrementAndGet(); throw e; @@ -205,7 +227,7 @@ public PublicationContext newPublicationContext(ClusterStatePublicationEvent clu } private ReleasableBytesReference serializeFullClusterState(ClusterState clusterState, DiscoveryNode node) { - final Version nodeVersion = node.getVersion(); + final TransportVersion serializeVersion = node.getVersion().transportVersion; final RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream(); boolean success = false; try { @@ -215,7 +237,7 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) ) ) { - stream.setVersion(nodeVersion); + stream.setTransportVersion(serializeVersion); stream.writeBoolean(true); clusterState.writeTo(stream); uncompressedBytes = stream.position(); @@ -225,9 +247,9 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS final ReleasableBytesReference result = new ReleasableBytesReference(bytesStream.bytes(), bytesStream); serializationStatsTracker.serializedFullState(uncompressedBytes, result.length()); logger.trace( - "serialized full cluster state version [{}] for node version [{}] with size [{}]", + "serialized full cluster state version [{}] using transport version [{}] with size [{}]", clusterState.version(), - nodeVersion, + serializeVersion, result.length() ); success = true; @@ -239,8 +261,9 @@ private ReleasableBytesReference serializeFullClusterState(ClusterState clusterS } } - private ReleasableBytesReference serializeDiffClusterState(long clusterStateVersion, Diff diff, DiscoveryNode node) { - final Version nodeVersion = node.getVersion(); + private ReleasableBytesReference serializeDiffClusterState(ClusterState newState, Diff diff, DiscoveryNode node) { + final long clusterStateVersion = newState.version(); + final TransportVersion serializeVersion = node.getVersion().transportVersion; final RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream(); boolean success = false; try { @@ -250,9 +273,13 @@ private ReleasableBytesReference serializeDiffClusterState(long clusterStateVers CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream)) ) ) { - stream.setVersion(nodeVersion); + stream.setTransportVersion(serializeVersion); stream.writeBoolean(false); diff.writeTo(stream); + if (serializeVersion.onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION)) { + stream.writeBoolean(newState.metadata().clusterUUIDCommitted()); + newState.getLastCommittedConfiguration().writeTo(stream); + } uncompressedBytes = stream.position(); } catch (IOException e) { throw new ElasticsearchException("failed to serialize cluster state diff for publishing to node {}", e, node); @@ -260,9 +287,9 @@ private ReleasableBytesReference serializeDiffClusterState(long clusterStateVers final ReleasableBytesReference result = new ReleasableBytesReference(bytesStream.bytes(), bytesStream); serializationStatsTracker.serializedDiff(uncompressedBytes, result.length()); logger.trace( - "serialized cluster state diff for version [{}] for node version [{}] with size [{}]", + "serialized cluster state diff for version [{}] using transport version [{}] with size [{}]", clusterStateVersion, - nodeVersion, + serializeVersion, result.length() ); success = true; @@ -316,7 +343,7 @@ void buildDiffAndSerializeStates() { } else { serializedDiffs.computeIfAbsent( node.getVersion(), - v -> serializeDiffClusterState(newState.version(), diffSupplier.getOrCompute(), node) + v -> serializeDiffClusterState(newState, diffSupplier.getOrCompute(), node) ); } } @@ -440,14 +467,10 @@ private void sendClusterState( transportService.sendChildRequest( destination, PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(bytes, destination.getVersion()), + new BytesTransportRequest(bytes, destination.getVersion().transportVersion), task, STATE_REQUEST_OPTIONS, - new ActionListenerResponseHandler<>( - ActionListener.runAfter(listener, bytes::decRef), - PublishWithJoinResponse::new, - ThreadPool.Names.CLUSTER_COORDINATION - ) + new CleanableResponseHandler<>(listener, PublishWithJoinResponse::new, ThreadPool.Names.CLUSTER_COORDINATION, bytes::decRef) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java index 2ed8dca833e5..2fd9b6304e2f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java @@ -41,7 +41,7 @@ public class StableMasterHealthIndicatorService implements HealthIndicatorServic public static final String NAME = "master_is_stable"; public static final String GET_HELP_GUIDE = "https://ela.st/getting-help"; - public static final Diagnosis CONTACT_SUPPORT_USER_ACTION = new Diagnosis( + public static final Diagnosis CONTACT_SUPPORT = new Diagnosis( new Diagnosis.Definition( NAME, "contact_support", @@ -104,7 +104,7 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { CoordinationDiagnosticsService.CoordinationDiagnosticsResult coordinationDiagnosticsResult = coordinationDiagnosticsService .diagnoseMasterStability(verbose); return getHealthIndicatorResult(coordinationDiagnosticsResult, verbose); @@ -218,7 +218,7 @@ private String getNameForNodeId(String nodeId) { */ private List getContactSupportUserActions(boolean explain) { if (explain) { - return List.of(CONTACT_SUPPORT_USER_ACTION); + return List.of(CONTACT_SUPPORT); } else { return List.of(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java index c4683aaabbae..a094fbee07e1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/UnsafeBootstrapMasterCommand.java @@ -116,10 +116,12 @@ protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet o final ClusterState newClusterState = ClusterState.builder(oldClusterState).metadata(newMetadata).build(); - terminal.println( - Terminal.Verbosity.VERBOSE, - "[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]" - ); + if (terminal.isPrintable(Terminal.Verbosity.VERBOSE)) { + terminal.println( + Terminal.Verbosity.VERBOSE, + "[old cluster state = " + oldClusterState + ", new cluster state = " + newClusterState + "]" + ); + } confirm(terminal, CONFIRMATION_MSG); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index 464be6284d21..d2f9f18466aa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.cluster.coordination; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; @@ -28,10 +28,10 @@ public class ValidateJoinRequest extends TransportRequest { public ValidateJoinRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { // recent versions send a BytesTransportRequest containing a compressed representation of the state final var bytes = in.readReleasableBytesReference(); - final var version = in.getVersion(); + final var version = in.getTransportVersion(); final var namedWriteableRegistry = in.namedWriteableRegistry(); this.stateSupplier = () -> readCompressed(version, bytes, namedWriteableRegistry); this.refCounted = bytes; @@ -43,8 +43,11 @@ public ValidateJoinRequest(StreamInput in) throws IOException { } } - private static ClusterState readCompressed(Version version, BytesReference bytes, NamedWriteableRegistry namedWriteableRegistry) - throws IOException { + private static ClusterState readCompressed( + TransportVersion version, + BytesReference bytes, + NamedWriteableRegistry namedWriteableRegistry + ) throws IOException { try ( var bytesStreamInput = bytes.streamInput(); var in = new NamedWriteableAwareStreamInput( @@ -52,7 +55,7 @@ private static ClusterState readCompressed(Version version, BytesReference bytes namedWriteableRegistry ) ) { - in.setVersion(version); + in.setTransportVersion(version); return ClusterState.readFrom(in, null); } } @@ -64,7 +67,7 @@ public ValidateJoinRequest(ClusterState state) { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getVersion().before(Version.V_8_3_0); + assert out.getTransportVersion().before(TransportVersion.V_8_3_0); super.writeTo(out); stateSupplier.get().writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java index 92fe6f18f9b5..26548f7b824c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java @@ -8,21 +8,23 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -84,8 +86,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_7_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_7_0; } @Override @@ -98,13 +100,8 @@ public static ComponentTemplateMetadata fromXContent(XContentParser parser) thro } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(COMPONENT_TEMPLATE.getPreferredName()); - for (Map.Entry template : componentTemplates.entrySet()) { - builder.field(template.getKey(), template.getValue(), params); - } - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return ChunkedToXContentHelper.xContentValuesMap(COMPONENT_TEMPLATE.getPreferredName(), componentTemplates); } @Override @@ -166,8 +163,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_7_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_7_0; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 2a8ea2dc1b6b..377d91d60a99 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; @@ -46,6 +47,7 @@ public class ComposableIndexTemplate implements SimpleDiffable PARSER = new ConstructingObjectParser<>( @@ -59,7 +61,8 @@ public class ComposableIndexTemplate implements SimpleDiffable) a[5], (DataStreamTemplate) a[6], - (Boolean) a[7] + (Boolean) a[7], + (List) a[8] ) ); @@ -72,6 +75,7 @@ public class ComposableIndexTemplate implements SimpleDiffable p.map(), METADATA); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), DataStreamTemplate.PARSER, DATA_STREAM); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_AUTO_CREATE); + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), IGNORE_MISSING_COMPONENT_TEMPLATES); } private final List indexPatterns; @@ -89,6 +93,8 @@ public class ComposableIndexTemplate implements SimpleDiffable ignoreMissingComponentTemplates; static Diff readITV2DiffFrom(StreamInput in) throws IOException { return SimpleDiffable.readDiffFrom(ComposableIndexTemplate::new, in); @@ -106,7 +112,7 @@ public ComposableIndexTemplate( @Nullable Long version, @Nullable Map metadata ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, null, null, null); } public ComposableIndexTemplate( @@ -118,7 +124,7 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate ) { - this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null); + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); } public ComposableIndexTemplate( @@ -130,6 +136,20 @@ public ComposableIndexTemplate( @Nullable Map metadata, @Nullable DataStreamTemplate dataStreamTemplate, @Nullable Boolean allowAutoCreate + ) { + this(indexPatterns, template, componentTemplates, priority, version, metadata, dataStreamTemplate, null, null); + } + + public ComposableIndexTemplate( + List indexPatterns, + @Nullable Template template, + @Nullable List componentTemplates, + @Nullable Long priority, + @Nullable Long version, + @Nullable Map metadata, + @Nullable DataStreamTemplate dataStreamTemplate, + @Nullable Boolean allowAutoCreate, + @Nullable List ignoreMissingComponentTemplates ) { this.indexPatterns = indexPatterns; this.template = template; @@ -139,6 +159,7 @@ public ComposableIndexTemplate( this.metadata = metadata; this.dataStreamTemplate = dataStreamTemplate; this.allowAutoCreate = allowAutoCreate; + this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates; } public ComposableIndexTemplate(StreamInput in) throws IOException { @@ -154,6 +175,11 @@ public ComposableIndexTemplate(StreamInput in) throws IOException { this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); this.allowAutoCreate = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_8_7_0)) { + this.ignoreMissingComponentTemplates = in.readOptionalStringList(); + } else { + this.ignoreMissingComponentTemplates = null; + } } public List indexPatterns() { @@ -204,6 +230,11 @@ public Boolean getAllowAutoCreate() { return this.allowAutoCreate; } + @Nullable + public List getIgnoreMissingComponentTemplates() { + return ignoreMissingComponentTemplates; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(this.indexPatterns); @@ -219,6 +250,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); out.writeOptionalBoolean(allowAutoCreate); + if (out.getVersion().onOrAfter(Version.V_8_7_0)) { + out.writeOptionalStringCollection(ignoreMissingComponentTemplates); + } } @Override @@ -246,6 +280,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (this.allowAutoCreate != null) { builder.field(ALLOW_AUTO_CREATE.getPreferredName(), allowAutoCreate); } + if (this.ignoreMissingComponentTemplates != null) { + builder.stringListField(IGNORE_MISSING_COMPONENT_TEMPLATES.getPreferredName(), ignoreMissingComponentTemplates); + } builder.endObject(); return builder; } @@ -260,7 +297,8 @@ public int hashCode() { this.version, this.metadata, this.dataStreamTemplate, - this.allowAutoCreate + this.allowAutoCreate, + this.ignoreMissingComponentTemplates ); } @@ -280,7 +318,8 @@ && componentTemplatesEquals(this.componentTemplates, other.componentTemplates) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) && Objects.equals(this.dataStreamTemplate, other.dataStreamTemplate) - && Objects.equals(this.allowAutoCreate, other.allowAutoCreate); + && Objects.equals(this.allowAutoCreate, other.allowAutoCreate) + && Objects.equals(this.ignoreMissingComponentTemplates, other.ignoreMissingComponentTemplates); } static boolean componentTemplatesEquals(List c1, List c2) { @@ -331,12 +370,12 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { DataStreamTemplate(StreamInput in) throws IOException { hidden = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { allowCustomRouting = in.readBoolean(); } else { allowCustomRouting = false; } - if (in.getVersion().onOrAfter(Version.V_8_1_0) && in.getVersion().before(Version.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) && in.getTransportVersion().before(TransportVersion.V_8_3_0)) { // Accidentally included index_mode to binary node to node protocol in previous releases. // (index_mode is removed and was part of code based when tsdb was behind a feature flag) // (index_mode was behind a feature in the xcontent parser, so it could never actually used) @@ -379,10 +418,11 @@ public boolean isAllowCustomRouting() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeBoolean(allowCustomRouting); } - if (out.getVersion().onOrAfter(Version.V_8_1_0) && out.getVersion().before(Version.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) + && out.getTransportVersion().before(TransportVersion.V_8_3_0)) { // See comment in constructor. out.writeBoolean(false); } @@ -420,6 +460,7 @@ public static class Builder { private Map metadata; private DataStreamTemplate dataStreamTemplate; private Boolean allowAutoCreate; + private List ignoreMissingComponentTemplates; public Builder() {} @@ -463,6 +504,11 @@ public Builder allowAutoCreate(Boolean allowAutoCreate) { return this; } + public Builder ignoreMissingComponentTemplates(List ignoreMissingComponentTemplates) { + this.ignoreMissingComponentTemplates = ignoreMissingComponentTemplates; + return this; + } + public ComposableIndexTemplate build() { return new ComposableIndexTemplate( this.indexPatterns, @@ -472,7 +518,8 @@ public ComposableIndexTemplate build() { this.version, this.metadata, this.dataStreamTemplate, - this.allowAutoCreate + this.allowAutoCreate, + this.ignoreMissingComponentTemplates ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java index 1b5193510aa6..53c73c7906b9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java @@ -8,21 +8,23 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -89,8 +91,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_7_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_7_0; } @Override @@ -99,13 +101,8 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(INDEX_TEMPLATE.getPreferredName()); - for (Map.Entry template : indexTemplates.entrySet()) { - builder.field(template.getKey(), template.getValue(), params); - } - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return ChunkedToXContentHelper.xContentValuesMap(INDEX_TEMPLATE.getPreferredName(), indexTemplates); } @Override @@ -167,8 +164,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_7_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_7_0; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 297219107b13..8e66cecd19c4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -11,7 +11,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PointValues; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; @@ -294,10 +294,13 @@ public DataStream rollover(Index writeIndex, long generation, boolean timeSeries */ public DataStream unsafeRollover(Index writeIndex, long generation, boolean timeSeries) { IndexMode indexMode = this.indexMode; - // This allows for migrating a data stream to be a tsdb data stream: - // (only if index_mode=null|standard then allow it to be set to time_series) if ((indexMode == null || indexMode == IndexMode.STANDARD) && timeSeries) { + // This allows for migrating a data stream to be a tsdb data stream: + // (only if index_mode=null|standard then allow it to be set to time_series) indexMode = IndexMode.TIME_SERIES; + } else if (indexMode == IndexMode.TIME_SERIES && timeSeries == false) { + // Allow downgrading a time series data stream to a regular data stream + indexMode = null; } List backingIndices = new ArrayList<>(indices); @@ -526,8 +529,8 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.readBoolean(), in.readBoolean(), - in.getVersion().onOrAfter(Version.V_8_0_0) ? in.readBoolean() : false, - in.getVersion().onOrAfter(Version.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null + in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null ); } @@ -550,10 +553,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); out.writeBoolean(replicated); out.writeBoolean(system); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeBoolean(allowCustomRouting); } - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { out.writeOptionalEnum(indexMode); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java index cac3e342c1dd..7efba02dfe34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.ParsingException; @@ -15,6 +16,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -25,29 +28,62 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Collectors; public class DataStreamAlias implements SimpleDiffable, ToXContentFragment { public static final ParseField DATA_STREAMS_FIELD = new ParseField("data_streams"); public static final ParseField WRITE_DATA_STREAM_FIELD = new ParseField("write_data_stream"); - public static final ParseField FILTER_FIELD = new ParseField("filter"); + /* + * Before 8.7.0, we incorrectly only kept one filter for all DataStreams in the DataStreamAlias. This field remains here so that we can + * read old cluster states during an upgrade. We never write XContent with this field as of 8.7.0. + */ + public static final ParseField OLD_FILTER_FIELD = new ParseField("filter"); + public static final ParseField FILTERS_FIELD = new ParseField("filters"); + private static final Logger logger = LogManager.getLogger(DataStreamAlias.class); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_stream_alias", false, - (args, name) -> new DataStreamAlias(name, (List) args[0], (String) args[1], (CompressedXContent) args[2]) + (args, name) -> { + /* + * If we are reading an older cluster state from disk we have to support reading in the single filter that was used before + * 8.7.0. In this case the new dataStreamsToFilters map will be null. So we write a new dataStreamsToFilters using the existing + * filter value for all DataStreams in order to carry forward the previously-existing behavior. + */ + Map dataStreamsToFilters = (Map) args[3]; + CompressedXContent oldFilter = (CompressedXContent) args[2]; + List dataStreamNames = (List) args[0]; + if (dataStreamsToFilters == null && oldFilter != null && dataStreamNames != null) { + logger.info( + "Reading in data stream alias [{}] with a pre-8.7.0-style data stream filter and using it for all data streams in " + + "the data stream alias", + name + ); + dataStreamsToFilters = new HashMap<>(); + for (String dataStreamName : dataStreamNames) { + dataStreamsToFilters.put(dataStreamName, oldFilter); + } + } + if (dataStreamsToFilters == null) { + dataStreamsToFilters = Map.of(); + } + return new DataStreamAlias(name, dataStreamNames, dataStreamsToFilters, (String) args[1]); + } ); static { PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), DATA_STREAMS_FIELD); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), WRITE_DATA_STREAM_FIELD); + // Note: This field is not used in 8.7.0 and higher: PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { if (p.currentToken() == XContentParser.Token.VALUE_EMBEDDED_OBJECT || p.currentToken() == XContentParser.Token.VALUE_STRING) { return new CompressedXContent(p.binaryValue()); @@ -58,24 +94,55 @@ public class DataStreamAlias implements SimpleDiffable, ToXCont assert false : "unexpected token [" + p.currentToken() + " ]"; return null; } - }, FILTER_FIELD, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); + }, OLD_FILTER_FIELD, ObjectParser.ValueType.VALUE_OBJECT_ARRAY); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(HashMap::new, xContentParser -> { + if (p.currentToken() == XContentParser.Token.VALUE_EMBEDDED_OBJECT || p.currentToken() == XContentParser.Token.VALUE_STRING) { + return new CompressedXContent(p.binaryValue()); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + XContentBuilder builder = XContentFactory.jsonBuilder().map(p.mapOrdered()); + return new CompressedXContent(BytesReference.bytes(builder)); + } else { + assert false : "unexpected token [" + p.currentToken() + " ]"; + return null; + } + }), FILTERS_FIELD); } private final String name; private final List dataStreams; private final String writeDataStream; - private final CompressedXContent filter; - - private DataStreamAlias(String name, List dataStreams, String writeDataStream, CompressedXContent filter) { + // package-private for testing + final Map dataStreamToFilterMap; + + private DataStreamAlias( + String name, + List dataStreams, + Map dataStreamsToFilters, + String writeDataStream + ) { this.name = Objects.requireNonNull(name); this.dataStreams = List.copyOf(dataStreams); this.writeDataStream = writeDataStream; - this.filter = filter; + this.dataStreamToFilterMap = new HashMap<>(dataStreamsToFilters); assert writeDataStream == null || dataStreams.contains(writeDataStream); } - public DataStreamAlias(String name, List dataStreams, String writeDataStream, Map filter) { - this(name, dataStreams, writeDataStream, compress(filter)); + public DataStreamAlias( + String name, + List dataStreams, + String writeDataStream, + Map> dataStreamsToFilters + ) { + this(name, dataStreams, compressFiltersMap(dataStreamsToFilters), writeDataStream); + } + + private static Map compressFiltersMap(Map> dataStreamToUncompressedFilterMap) { + if (dataStreamToUncompressedFilterMap == null) { + return Map.of(); + } + return dataStreamToUncompressedFilterMap.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> compress(entry.getValue()))); } private static CompressedXContent compress(Map filterAsMap) { @@ -100,7 +167,21 @@ public DataStreamAlias(StreamInput in) throws IOException { this.name = in.readString(); this.dataStreams = in.readStringList(); this.writeDataStream = in.readOptionalString(); - this.filter = in.readBoolean() ? CompressedXContent.readCompressedString(in) : null; + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + this.dataStreamToFilterMap = in.readMap(StreamInput::readString, CompressedXContent::readCompressedString); + } else { + this.dataStreamToFilterMap = new HashMap<>(); + CompressedXContent filter = in.readBoolean() ? CompressedXContent.readCompressedString(in) : null; + if (filter != null) { + /* + * Here we're reading in a DataStreamAlias from before 8.7.0, which did not correctly associate filters with DataStreams. + * So we associated the same filter with all DataStreams in the alias to replicate the old behavior. + */ + for (String dataStream : dataStreams) { + dataStreamToFilterMap.put(dataStream, filter); + } + } + } } /** @@ -128,12 +209,12 @@ public String getWriteDataStream() { return writeDataStream; } - public CompressedXContent getFilter() { - return filter; + public CompressedXContent getFilter(String dataStreamName) { + return dataStreamToFilterMap.get(dataStreamName); } public boolean filteringRequired() { - return filter != null; + return dataStreamToFilterMap.isEmpty() == false; } /** @@ -158,23 +239,25 @@ public DataStreamAlias update(String dataStream, Boolean isWriteDataStream, Map< } boolean filterUpdated; - CompressedXContent filter; if (filterAsMap != null) { - filter = compress(filterAsMap); - if (this.filter == null) { + CompressedXContent previousFilter = dataStreamToFilterMap.get(dataStream); + if (previousFilter == null) { filterUpdated = true; } else { - filterUpdated = filterAsMap.equals(decompress(this.filter)) == false; + filterUpdated = filterAsMap.equals(decompress(previousFilter)) == false; } } else { - filter = this.filter; filterUpdated = false; } Set dataStreams = new HashSet<>(this.dataStreams); boolean added = dataStreams.add(dataStream); if (added || Objects.equals(this.writeDataStream, writeDataStream) == false || filterUpdated) { - return new DataStreamAlias(name, List.copyOf(dataStreams), writeDataStream, filter); + Map newDataStreamToFilterMap = new HashMap<>(dataStreamToFilterMap); + if (filterAsMap != null) { + newDataStreamToFilterMap.put(dataStream, compress(filterAsMap)); + } + return new DataStreamAlias(name, List.copyOf(dataStreams), newDataStreamToFilterMap, writeDataStream); } else { return this; } @@ -199,7 +282,7 @@ public DataStreamAlias removeDataStream(String dataStream) { if (dataStream.equals(writeDataStream)) { writeDataStream = null; } - return new DataStreamAlias(name, List.copyOf(dataStreams), writeDataStream, filter); + return new DataStreamAlias(name, List.copyOf(dataStreams), dataStreamToFilterMap, writeDataStream); } } @@ -216,7 +299,7 @@ public DataStreamAlias intersect(Predicate filter) { if (intersectingDataStreams.contains(writeDataStream) == false) { writeDataStream = null; } - return new DataStreamAlias(this.name, intersectingDataStreams, writeDataStream, this.filter); + return new DataStreamAlias(this.name, intersectingDataStreams, this.dataStreamToFilterMap, writeDataStream); } /** @@ -273,7 +356,7 @@ public DataStreamAlias restore(DataStreamAlias previous, String renamePattern, S } } - return new DataStreamAlias(this.name, List.copyOf(mergedDataStreams), writeDataStream, filter); + return new DataStreamAlias(this.name, List.copyOf(mergedDataStreams), dataStreamToFilterMap, writeDataStream); } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -296,15 +379,17 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (writeDataStream != null) { builder.field(WRITE_DATA_STREAM_FIELD.getPreferredName(), writeDataStream); } - if (filter != null) { - boolean binary = params.paramAsBoolean("binary", false); + boolean binary = params.paramAsBoolean("binary", false); + builder.startObject("filters"); + for (Map.Entry entry : dataStreamToFilterMap.entrySet()) { if (binary) { - builder.field("filter", filter.compressed()); + builder.field(entry.getKey(), entry.getValue().compressed()); } else { - builder.field("filter", XContentHelper.convertToMap(filter.uncompressed(), true).v2()); + builder.field(entry.getKey(), XContentHelper.convertToMap(entry.getValue().uncompressed(), true).v2()); } } builder.endObject(); + builder.endObject(); return builder; } @@ -313,11 +398,19 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeStringCollection(dataStreams); out.writeOptionalString(writeDataStream); - if (filter != null) { - out.writeBoolean(true); - filter.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + out.writeMap(dataStreamToFilterMap, StreamOutput::writeString, (out1, filter) -> filter.writeTo(out1)); } else { - out.writeBoolean(false); + if (dataStreamToFilterMap.isEmpty()) { + out.writeBoolean(false); + } else { + /* + * TransportVersions before 8.7 incorrectly only allowed a single filter for all datastreams, + * and randomly dropped all others. We replicate that buggy behavior here if we have to write + * to an older node because there is no way to send multipole filters to an older node. + */ + dataStreamToFilterMap.values().iterator().next().writeTo(out); + } } } @@ -329,12 +422,12 @@ public boolean equals(Object o) { return Objects.equals(name, that.name) && Objects.equals(dataStreams, that.dataStreams) && Objects.equals(writeDataStream, that.writeDataStream) - && Objects.equals(filter, that.filter); + && Objects.equals(dataStreamToFilterMap, that.dataStreamToFilterMap); } @Override public int hashCode() { - return Objects.hash(name, dataStreams, writeDataStream, filter); + return Objects.hash(name, dataStreams, writeDataStream, dataStreamToFilterMap); } @Override @@ -348,8 +441,11 @@ public String toString() { + ", writeDataStream='" + writeDataStream + '\'' - + ", filter=" - + (filter != null ? filter.string() : "null") + + ", dataStreamToFilterMap=" + + dataStreamToFilterMap.keySet() + .stream() + .map(key -> key + "=" + dataStreamToFilterMap.get(key)) + .collect(Collectors.joining(", ", "{", "}")) + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java index ff05aab210f6..17d2d2d1109c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java @@ -9,18 +9,20 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -28,6 +30,7 @@ import java.util.ArrayList; import java.util.EnumSet; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -114,7 +117,12 @@ public DataStreamMetadata withAlias(String aliasName, String dataStream, Boolean DataStreamAlias alias = dataStreamAliases.get(aliasName); if (alias == null) { String writeDataStream = isWriteDataStream != null && isWriteDataStream ? dataStream : null; - alias = new DataStreamAlias(aliasName, List.of(dataStream), writeDataStream, filterAsMap); + alias = new DataStreamAlias( + aliasName, + List.of(dataStream), + writeDataStream, + filterAsMap == null ? null : Map.of(dataStream, filterAsMap) + ); } else { DataStreamAlias copy = alias.update(dataStream, isWriteDataStream, filterAsMap); if (copy == alias) { @@ -207,8 +215,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_7_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_7_0; } @Override @@ -222,14 +230,13 @@ public static DataStreamMetadata fromXContent(XContentParser parser) throws IOEx } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.xContentValuesMap(DATA_STREAM.getPreferredName(), dataStreams); - builder.startObject(DATA_STREAM_ALIASES.getPreferredName()); - for (Map.Entry dataStream : dataStreamAliases.entrySet()) { - dataStream.getValue().toXContent(builder, params); - } - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.concat( + ChunkedToXContentHelper.xContentValuesMap(DATA_STREAM.getPreferredName(), dataStreams), + ChunkedToXContentHelper.startObject(DATA_STREAM_ALIASES.getPreferredName()), + dataStreamAliases.values().iterator(), + ChunkedToXContentHelper.endObject() + ); } @Override @@ -305,8 +312,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_7_7_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_7_7_0; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index 8437ad0efcf3..2c29df2e661c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -38,7 +39,7 @@ import static org.elasticsearch.node.NodeRoleSettings.NODE_ROLES_SETTING; public final class DesiredNode implements Writeable, ToXContentObject, Comparable { - public static final Version RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; + public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_3_0; private static final ParseField SETTINGS_FIELD = new ParseField("settings"); private static final ParseField PROCESSORS_FIELD = new ParseField("processors"); @@ -173,7 +174,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { final var settings = Settings.readSettingsFromStream(in); final Processors processors; final ProcessorsRange processorsRange; - if (in.getVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (in.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { processors = in.readOptionalWriteable(Processors::readFrom); processorsRange = in.readOptionalWriteable(ProcessorsRange::readFrom); } else { @@ -189,7 +190,7 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { settings.writeTo(out); - if (out.getVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (out.getTransportVersion().onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { out.writeOptionalWriteable(processors); out.writeOptionalWriteable(processorsRange); } else { @@ -296,7 +297,7 @@ public Set getRoles() { return roles; } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 13a10213b8ed..036b31fa65be 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,7 +31,7 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) ToXContentObject, Comparable { - private static final Version STATUS_TRACKING_SUPPORT_VERSION = Version.V_8_4_0; + private static final TransportVersion STATUS_TRACKING_SUPPORT_VERSION = TransportVersion.V_8_4_0; private static final ParseField STATUS_FIELD = new ParseField("status"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -47,7 +48,7 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) ), // An unknown status is expected during upgrades to versions >= STATUS_TRACKING_SUPPORT_VERSION // the desired node status would be populated when a node in the newer version is elected as - // master, the desired nodes status update happens in JoinTaskExecutor. + // master, the desired nodes status update happens in NodeJoinExecutor. args[6] == null ? Status.PENDING : (Status) args[6] ) ); @@ -77,14 +78,14 @@ public String externalId() { public static DesiredNodeWithStatus readFrom(StreamInput in) throws IOException { final var desiredNode = DesiredNode.readFrom(in); final Status status; - if (in.getVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) { + if (in.getTransportVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) { status = Status.fromValue(in.readShort()); } else { // During upgrades, we consider all desired nodes as PENDING // since it's impossible to know if a node that was supposed to // join the cluster, it joined. The status will be updated // once the master node is upgraded to a version >= STATUS_TRACKING_SUPPORT_VERSION - // in JoinTaskExecutor or when the desired nodes are upgraded to a new version. + // in NodeJoinExecutor or when the desired nodes are upgraded to a new version. status = Status.PENDING; } return new DesiredNodeWithStatus(desiredNode, status); @@ -93,7 +94,7 @@ public static DesiredNodeWithStatus readFrom(StreamInput in) throws IOException @Override public void writeTo(StreamOutput out) throws IOException { desiredNode.writeTo(out); - if (out.getVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) { + if (out.getTransportVersion().onOrAfter(STATUS_TRACKING_SUPPORT_VERSION)) { out.writeShort(status.value); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java index b720c90a0673..cadc57c0c2a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java @@ -8,7 +8,9 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.action.admin.cluster.desirednodes.TransportUpdateDesiredNodesAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.NodeJoinExecutor; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.StreamInput; @@ -97,8 +99,7 @@ * * *

- * See {@code JoinTaskExecutor} and {@code TransportUpdateDesiredNodesAction} for more details about - * desired nodes status tracking. + * See {@link NodeJoinExecutor} and {@link TransportUpdateDesiredNodesAction} for more details about desired nodes status tracking. *

* *

diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java index 64a384f77e12..f307d060f449 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java @@ -8,24 +8,26 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.EnumSet; +import java.util.Iterator; import java.util.Objects; public class DesiredNodesMetadata extends AbstractNamedDiffable implements Metadata.Custom { - private static final Version MIN_SUPPORTED_VERSION = Version.V_8_1_0; + private static final TransportVersion MIN_SUPPORTED_VERSION = TransportVersion.V_8_1_0; public static final String TYPE = "desired_nodes"; public static final DesiredNodesMetadata EMPTY = new DesiredNodesMetadata((DesiredNodes) null); @@ -67,9 +69,8 @@ public static DesiredNodesMetadata fromXContent(XContentParser parser) throws IO } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(LATEST_FIELD.getPreferredName(), latestDesiredNodes, params); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.single((builder, params) -> builder.field(LATEST_FIELD.getPreferredName(), latestDesiredNodes, params)); } public static DesiredNodesMetadata fromClusterState(ClusterState clusterState) { @@ -92,7 +93,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { + public TransportVersion getMinimalSupportedVersion() { return MIN_SUPPORTED_VERSION; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java index 76f9643b0fb4..e9405f061e99 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java @@ -14,11 +14,11 @@ import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Predicate; +import java.util.function.Supplier; public class IndexAbstractionResolver { @@ -28,30 +28,12 @@ public IndexAbstractionResolver(IndexNameExpressionResolver indexNameExpressionR this.indexNameExpressionResolver = indexNameExpressionResolver; } - public List resolveIndexAbstractions( - String[] indices, - IndicesOptions indicesOptions, - Metadata metadata, - boolean includeDataStreams - ) { - return resolveIndexAbstractions(Arrays.asList(indices), indicesOptions, metadata, includeDataStreams); - } - - public List resolveIndexAbstractions( - Iterable indices, - IndicesOptions indicesOptions, - Metadata metadata, - boolean includeDataStreams - ) { - Set availableIndexAbstractions = metadata.getIndicesLookup().keySet(); - return resolveIndexAbstractions(indices, indicesOptions, metadata, availableIndexAbstractions, includeDataStreams); - } - public List resolveIndexAbstractions( Iterable indices, IndicesOptions indicesOptions, Metadata metadata, - Collection availableIndexAbstractions, + Supplier> allAuthorizedAndAvailable, + Predicate isAuthorized, boolean includeDataStreams ) { List finalIndices = new ArrayList<>(); @@ -72,7 +54,7 @@ public List resolveIndexAbstractions( if (indicesOptions.expandWildcardExpressions() && Regex.isSimpleMatchPattern(indexAbstraction)) { wildcardSeen = true; Set resolvedIndices = new HashSet<>(); - for (String authorizedIndex : availableIndexAbstractions) { + for (String authorizedIndex : allAuthorizedAndAvailable.get()) { if (Regex.simpleMatch(indexAbstraction, authorizedIndex) && isIndexVisible( indexAbstraction, @@ -100,7 +82,10 @@ && isIndexVisible( } else { if (minus) { finalIndices.remove(indexAbstraction); - } else if (indicesOptions.ignoreUnavailable() == false || availableIndexAbstractions.contains(indexAbstraction)) { + } else if (indicesOptions.ignoreUnavailable() == false || isAuthorized.test(indexAbstraction)) { + // Unauthorized names are considered unavailable, so if `ignoreUnavailable` is `true` they should be silently + // discarded from the `finalIndices` list. Other "ways of unavailable" must be handled by the action + // handler, see: https://github.com/elastic/elasticsearch/issues/90215 finalIndices.add(indexAbstraction); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index 7b600d875c79..ec8f659913e8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.io.stream.StreamInput; @@ -17,10 +17,12 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.index.Index; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -32,6 +34,7 @@ import java.util.Collection; import java.util.Collections; import java.util.EnumSet; +import java.util.Iterator; import java.util.List; import java.util.Objects; @@ -84,8 +87,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } @Override @@ -123,12 +126,8 @@ public boolean containsIndex(final Index index) { } @Override - public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { - builder.startArray(TOMBSTONES_FIELD.getPreferredName()); - for (Tombstone tombstone : tombstones) { - tombstone.toXContent(builder, params); - } - return builder.endArray(); + public Iterator toXContentChunked(ToXContent.Params ignored) { + return ChunkedToXContentHelper.array(TOMBSTONES_FIELD.getPreferredName(), tombstones.iterator()); } public static IndexGraveyard fromXContent(final XContentParser parser) throws IOException { @@ -334,8 +333,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 77ea513099e4..436470b3ebda 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -8,6 +8,9 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.action.support.ActiveShardCount; @@ -83,6 +86,8 @@ public class IndexMetadata implements Diffable, ToXContentFragment { + private static final Logger logger = LogManager.getLogger(IndexMetadata.class); + public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock( 5, "index read-only (api)", @@ -530,9 +535,9 @@ public Iterator> settings() { public static final String INDEX_STATE_FILE_PREFIX = "state-"; - static final Version SYSTEM_INDEX_FLAG_ADDED = Version.V_7_10_0; + static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersion.V_7_10_0; - static final Version STATS_AND_FORECAST_ADDED = Version.V_8_6_0; + static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersion.V_8_6_0; private final int routingNumShards; private final int routingFactor; @@ -1402,7 +1407,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private static final Version SETTING_DIFF_VERSION = Version.V_8_5_0; + private static final TransportVersion SETTING_DIFF_VERSION = TransportVersion.V_8_5_0; private static class IndexMetadataDiff implements Diff { @@ -1483,13 +1488,13 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getVersion().onOrAfter(Version.V_7_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { aliasesVersion = in.readVLong(); } else { aliasesVersion = 1; } state = State.fromId(in.readByte()); - if (in.getVersion().onOrAfter(SETTING_DIFF_VERSION)) { + if (in.getTransportVersion().onOrAfter(SETTING_DIFF_VERSION)) { settings = null; settingsDiff = Settings.readSettingsDiffFromStream(in); } else { @@ -1510,13 +1515,13 @@ private static class IndexMetadataDiff implements Diff { DiffableUtils.getStringKeySerializer(), ROLLOVER_INFO_DIFF_VALUE_READER ); - if (in.getVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { + if (in.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { isSystem = in.readBoolean(); } else { isSystem = false; } timestampRange = IndexLongFieldRange.readFrom(in); - if (in.getVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { + if (in.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { stats = in.readOptionalWriteable(IndexMetadataStats::new); indexWriteLoadForecast = in.readOptionalDouble(); shardSizeInBytesForecast = in.readOptionalLong(); @@ -1534,13 +1539,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(Version.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { out.writeVLong(aliasesVersion); } out.writeByte(state.id); assert settings != null : "settings should always be non-null since this instance is not expected to have been read from another node"; - if (out.getVersion().onOrAfter(SETTING_DIFF_VERSION)) { + if (out.getTransportVersion().onOrAfter(SETTING_DIFF_VERSION)) { settingsDiff.writeTo(out); } else { settings.writeTo(out); @@ -1551,11 +1556,11 @@ public void writeTo(StreamOutput out) throws IOException { customData.writeTo(out); inSyncAllocationIds.writeTo(out); rolloverInfos.writeTo(out); - if (out.getVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { + if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { out.writeBoolean(isSystem); } timestampRange.writeTo(out); - if (out.getVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { + if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { out.writeOptionalWriteable(stats); out.writeOptionalDouble(indexWriteLoadForecast); out.writeOptionalLong(shardSizeInBytesForecast); @@ -1589,7 +1594,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.stats(stats); builder.indexWriteLoadForecast(indexWriteLoadForecast); builder.shardSizeInBytesForecast(shardSizeInBytesForecast); - return builder.build(); + return builder.build(true); } } @@ -1607,7 +1612,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function DiffableUtils.StringSetValueSerializer.getInstance().write(v, o) ); out.writeCollection(rolloverInfos.values()); - if (out.getVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { + if (out.getTransportVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) { out.writeBoolean(isSystem); } timestampRange.writeTo(out); - if (out.getVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { + if (out.getTransportVersion().onOrAfter(STATS_AND_FORECAST_ADDED)) { out.writeOptionalWriteable(stats); out.writeOptionalDouble(writeLoadForecast); out.writeOptionalLong(shardSizeInBytesForecast); @@ -2012,6 +2017,11 @@ public Builder shardSizeInBytesForecast(Long shardSizeInBytesForecast) { } public IndexMetadata build() { + return build(false); + } + + // package private for testing + IndexMetadata build(boolean repair) { /* * We expect that the metadata has been properly built to set the number of shards and the number of replicas, and do not rely * on the default values here. Those must have been set upstream. @@ -2140,7 +2150,16 @@ public IndexMetadata build() { var aliasesMap = aliases.build(); for (AliasMetadata alias : aliasesMap.values()) { if (alias.alias().equals(index)) { - throw new IllegalArgumentException("alias name [" + index + "] self-conflicts with index name"); + if (repair && indexCreatedVersion.equals(Version.V_8_5_0)) { + var updatedBuilder = ImmutableOpenMap.builder(aliasesMap); + final var brokenAlias = updatedBuilder.remove(index); + final var fixedAlias = AliasMetadata.newAliasMetadata(brokenAlias, index + "-alias-corrupted-by-8-5"); + aliasesMap = updatedBuilder.fPut(fixedAlias.getAlias(), fixedAlias).build(); + logger.warn("Repaired corrupted alias with the same name as its index for [{}]", index); + break; + } else { + throw new IllegalArgumentException("alias name [" + index + "] self-conflicts with index name"); + } } } @@ -2463,7 +2482,7 @@ public static IndexMetadata fromXContent(XContentParser parser, Map dataStreamNames(ClusterState state, IndicesOptions options, getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - if (indexExpressions == null || indexExpressions.length == 0) { - indexExpressions = new String[] { "*" }; - } - - final Collection expressions = resolveExpressions(Arrays.asList(indexExpressions), context); + final Collection expressions = resolveExpressions(context, indexExpressions); return expressions.stream() .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) @@ -219,13 +216,10 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(List.of(request.index()), context); + final Collection expressions = resolveExpressions(context, request.index()); if (expressions.size() == 1) { IndexAbstraction ia = state.metadata().getIndicesLookup().get(expressions.iterator().next()); - if (ia == null) { - throw new IndexNotFoundException(expressions.iterator().next()); - } if (ia.getType() == Type.ALIAS) { Index writeIndex = ia.getWriteIndex(); if (writeIndex == null) { @@ -247,8 +241,28 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit } } - private static Collection resolveExpressions(List expressions, Context context) { - return WildcardExpressionResolver.resolve(context, DateMathExpressionResolver.resolve(context, expressions)); + protected static Collection resolveExpressions(Context context, String... expressions) { + if (context.getOptions().expandWildcardExpressions() == false) { + if (expressions == null || expressions.length == 0 || expressions.length == 1 && Metadata.ALL.equals(expressions[0])) { + return List.of(); + } else { + return ExplicitResourceNameFilter.filterUnavailable( + context, + DateMathExpressionResolver.resolve(context, List.of(expressions)) + ); + } + } else { + if (expressions == null + || expressions.length == 0 + || expressions.length == 1 && (Metadata.ALL.equals(expressions[0]) || Regex.isMatchAllPattern(expressions[0]))) { + return WildcardExpressionResolver.resolveAll(context); + } else { + return WildcardExpressionResolver.resolve( + context, + ExplicitResourceNameFilter.filterUnavailable(context, DateMathExpressionResolver.resolve(context, List.of(expressions))) + ); + } + } } /** @@ -320,47 +334,13 @@ String[] concreteIndexNames(Context context, String... indexExpressions) { } Index[] concreteIndices(Context context, String... indexExpressions) { - IndicesOptions options = context.getOptions(); - if (indexExpressions == null || indexExpressions.length == 0) { - indexExpressions = new String[] { Metadata.ALL }; - } else { - if (options.ignoreUnavailable() == false) { - List crossClusterIndices = Arrays.stream(indexExpressions).filter(index -> index.contains(":")).toList(); - if (crossClusterIndices.size() > 0) { - throw new IllegalArgumentException( - "Cross-cluster calls are not supported in this context but remote indices " - + "were requested: " - + crossClusterIndices - ); - } - } - } - - final Collection expressions = resolveExpressions(Arrays.asList(indexExpressions), context); + final Collection expressions = resolveExpressions(context, indexExpressions); - if (expressions.isEmpty() || (expressions.size() == 1 && expressions.iterator().next().equals(Metadata.ALL))) { - if (options.allowNoIndices() == false) { - throw notFoundException(indexExpressions); - } else { - return Index.EMPTY_ARRAY; - } - } - - final Set concreteIndices = Sets.newLinkedHashSetWithExpectedSize(expressions.size()); - final SortedMap indicesLookup = context.state.metadata().getIndicesLookup(); + final Set concreteIndicesResult = Sets.newLinkedHashSetWithExpectedSize(expressions.size()); + final Map indicesLookup = context.getState().metadata().getIndicesLookup(); for (String expression : expressions) { - if (options.ignoreUnavailable() == false) { - ensureAliasOrIndexExists(context, expression); - } - IndexAbstraction indexAbstraction = indicesLookup.get(expression); - if (indexAbstraction == null) { - continue; - } else if (indexAbstraction.getType() == Type.ALIAS && context.getOptions().ignoreAliases()) { - continue; - } else if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { - continue; - } - + final IndexAbstraction indexAbstraction = indicesLookup.get(expression); + assert indexAbstraction != null; if (indexAbstraction.getType() == Type.ALIAS && context.isResolveToWriteIndex()) { Index writeIndex = indexAbstraction.getWriteIndex(); if (writeIndex == null) { @@ -373,15 +353,15 @@ Index[] concreteIndices(Context context, String... indexExpressions) { ); } if (addIndex(writeIndex, null, context)) { - concreteIndices.add(writeIndex); + concreteIndicesResult.add(writeIndex); } } else if (indexAbstraction.getType() == Type.DATA_STREAM && context.isResolveToWriteIndex()) { Index writeIndex = indexAbstraction.getWriteIndex(); if (addIndex(writeIndex, null, context)) { - concreteIndices.add(writeIndex); + concreteIndicesResult.add(writeIndex); } } else { - if (indexAbstraction.getIndices().size() > 1 && options.allowAliasesToMultipleIndices() == false) { + if (indexAbstraction.getIndices().size() > 1 && context.getOptions().allowAliasesToMultipleIndices() == false) { String[] indexNames = new String[indexAbstraction.getIndices().size()]; int i = 0; for (Index indexName : indexAbstraction.getIndices()) { @@ -398,18 +378,18 @@ Index[] concreteIndices(Context context, String... indexExpressions) { } for (Index index : indexAbstraction.getIndices()) { - if (shouldTrackConcreteIndex(context, options, index)) { - concreteIndices.add(index); + if (shouldTrackConcreteIndex(context, context.getOptions(), index)) { + concreteIndicesResult.add(index); } } } } - if (options.allowNoIndices() == false && concreteIndices.isEmpty()) { + if (context.getOptions().allowNoIndices() == false && concreteIndicesResult.isEmpty()) { throw notFoundException(indexExpressions); } - checkSystemIndexAccess(context, concreteIndices); - return concreteIndices.toArray(Index.EMPTY_ARRAY); + checkSystemIndexAccess(context, concreteIndicesResult); + return concreteIndicesResult.toArray(Index.EMPTY_ARRAY); } private void checkSystemIndexAccess(Context context, Set concreteIndices) { @@ -459,13 +439,14 @@ private void checkSystemIndexAccess(Context context, Set concreteIndices) } private static IndexNotFoundException notFoundException(String... indexExpressions) { - IndexNotFoundException infe; - if (indexExpressions != null && indexExpressions.length == 1) { - if (Metadata.ALL.equals(indexExpressions[0])) { - infe = new IndexNotFoundException("no indices exist", indexExpressions[0]); - } else { - infe = new IndexNotFoundException(indexExpressions[0]); - } + final IndexNotFoundException infe; + if (indexExpressions == null + || indexExpressions.length == 0 + || (indexExpressions.length == 1 && Metadata.ALL.equals(indexExpressions[0]))) { + infe = new IndexNotFoundException("no indices exist", Metadata.ALL); + infe.setResources("index_or_alias", Metadata.ALL); + } else if (indexExpressions.length == 1) { + infe = new IndexNotFoundException(indexExpressions[0]); infe.setResources("index_or_alias", indexExpressions[0]); } else { infe = new IndexNotFoundException((String) null); @@ -474,24 +455,6 @@ private static IndexNotFoundException notFoundException(String... indexExpressio return infe; } - @Nullable - private static void ensureAliasOrIndexExists(Context context, String expression) { - IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(expression); - if (indexAbstraction == null) { - throw notFoundException(expression); - } - // treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) - if (indexAbstraction.getType() == Type.ALIAS && context.getOptions().ignoreAliases()) { - throw aliasesNotSupportedException(expression); - } - if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { - IndexNotFoundException infe = notFoundException(expression); - // Allows callers to handle IndexNotFoundException differently based on whether data streams were excluded. - infe.addMetadata(EXCLUDED_DATA_STREAMS_KEY, "true"); - throw infe; - } - } - private static boolean shouldTrackConcreteIndex(Context context, IndicesOptions options, Index index) { if (context.systemIndexAccessLevel == SystemIndexAccessLevel.BACKWARDS_COMPATIBLE_ONLY && context.netNewSystemIndexPredicate.test(index.getName())) { @@ -652,17 +615,32 @@ public static String resolveDateMathExpression(String dateExpression, long time) * Resolve an array of expressions to the set of indices and aliases that these expressions match. */ public Set resolveExpressions(ClusterState state, String... expressions) { + return resolveExpressions(state, IndicesOptions.lenientExpandOpen(), false, expressions); + } + + /** + * Resolve the expression to the set of indices, aliases, and, optionally, datastreams that the expression matches. + * If {@param preserveDataStreams} is {@code true}, datastreams that are covered by the wildcards from the + * {@param expressions} are returned as-is, without expanding them further to their respective backing indices. + */ + public Set resolveExpressions( + ClusterState state, + IndicesOptions indicesOptions, + boolean preserveDataStreams, + String... expressions + ) { Context context = new Context( state, - IndicesOptions.lenientExpandOpen(), + indicesOptions, true, false, true, + preserveDataStreams, getSystemIndexAccessLevel(), getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - Collection resolved = resolveExpressions(Arrays.asList(expressions), context); + Collection resolved = resolveExpressions(context, expressions); if (resolved instanceof Set) { // unmodifiable without creating a new collection as it might contain many items return Collections.unmodifiableSet((Set) resolved); @@ -789,10 +767,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection resolvedExpressions = resolveExpressions( - expressions != null ? Arrays.asList(expressions) : Collections.emptyList(), - context - ); + final Collection resolvedExpressions = resolveExpressions(context, expressions); // TODO: it appears that this can never be true? if (isAllIndices(resolvedExpressions)) { @@ -1128,25 +1103,11 @@ private WildcardExpressionResolver() { // Utility class } - /** - * Returns a collection of resource names given the {@param expressions} which contains wildcards and exclusions. - */ - public static Collection resolve(Context context, List expressions) { - Objects.requireNonNull(expressions); - if (context.getOptions().expandWildcardExpressions() == false) { - return expressions; - } else if (isEmptyOrTrivialWildcard(expressions)) { - return innerResolveAll(context); - } else { - return innerResolve(context, expressions); - } - } - /** * Returns all the indices and all the datastreams, considering the open/closed, system, and hidden context parameters. * Depending on the context, returns the names of the datastreams themselves or their backing indices. */ - private static Collection innerResolveAll(Context context) { + public static Collection resolveAll(Context context) { List resolvedExpressions = resolveEmptyOrTrivialWildcard(context); if (context.includeDataStreams() == false) { return resolvedExpressions; @@ -1173,8 +1134,7 @@ private static Collection innerResolveAll(Context context) { /** * Returns all the existing resource (index, alias and datastream) names that the {@param expressions} list resolves to. - * The passed-in {@param expressions} can contain wildcards and exclusions, as well as plain resource names, - * but it mustn't be empty. + * The passed-in {@param expressions} can contain wildcards and exclusions, as well as plain resource names. *
* The return is a {@code Collection} (usually a {@code Set} but can also be a {@code List}, for performance reasons) of plain * resource names only. All the returned resources are "accessible", in the given context, i.e. the resources exist @@ -1187,80 +1147,39 @@ private static Collection innerResolveAll(Context context) { * ultimately returned, instead of the alias or datastream name * */ - private static Collection innerResolve(Context context, List expressions) { - if (Objects.requireNonNull(expressions).isEmpty()) { - throw new IllegalStateException("Cannot resolve empty index expression"); + public static Collection resolve(Context context, List expressions) { + ExpressionList expressionList = new ExpressionList(context, expressions); + // fast exit if there are no wildcards to evaluate + if (expressionList.hasWildcard() == false) { + return expressions; } - Collection result = null; - boolean wildcardSeen = false; - for (int i = 0; i < expressions.size(); i++) { - String expression = validateAliasOrIndex(expressions.get(i)); - boolean isExclusion = false; - if (expression.charAt(0) == '-' && wildcardSeen) { - isExclusion = true; - expression = expression.substring(1); - } - if (Regex.isSimpleMatchPattern(expression)) { - wildcardSeen = true; - Stream matchingResources = matchResourcesToWildcard(context, expression); + Set result = new HashSet<>(); + for (ExpressionList.Expression expression : expressionList) { + if (expression.isWildcard()) { + Stream matchingResources = matchResourcesToWildcard(context, expression.get()); Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); AtomicBoolean emptyWildcardExpansion = new AtomicBoolean(false); if (context.getOptions().allowNoIndices() == false) { emptyWildcardExpansion.set(true); matchingOpenClosedNames = matchingOpenClosedNames.peek(x -> emptyWildcardExpansion.set(false)); } - if (result == null) { - // add all the previous expressions because they exist but were not added, as an optimisation - result = new HashSet<>(expressions.subList(0, i)); - } - if (isExclusion) { + if (expression.isExclusion()) { matchingOpenClosedNames.forEachOrdered(result::remove); } else { matchingOpenClosedNames.forEachOrdered(result::add); } if (emptyWildcardExpansion.get()) { - throw notFoundException(expression); + throw notFoundException(expression.get()); } } else { - if (isExclusion) { - if (result == null) { - // add all the previous expressions because they exist but were not added, as an optimisation - result = new HashSet<>(expressions.subList(0, i)); - } - result.remove(expression); + if (expression.isExclusion()) { + result.remove(expression.get()); } else { - // missing expression that is neither an exclusion nor a wildcard - // TODO investigate if this check can be moved outside the wildcard resolver - if (context.getOptions().ignoreUnavailable() == false) { - ensureAliasOrIndexExists(context, expression); - } - if (result != null) { - // skip adding the expression as an optimization - result.add(expression); - } + result.add(expression.get()); } } } - if (result == null) { - // optimisation that avoids allocating a new collection when all the argument expressions are explicit names - return expressions; - } else { - return result; - } - } - - private static String validateAliasOrIndex(String expression) { - if (Strings.isEmpty(expression)) { - throw notFoundException(expression); - } - // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API - // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, - // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown - // if the expression can't be found. - if (expression.charAt(0) == '_') { - throw new InvalidIndexNameException(expression, "must not start with '_'."); - } - return expression; + return result; } private static IndexMetadata.State excludeState(IndicesOptions options) { @@ -1287,7 +1206,7 @@ private static IndexMetadata.State excludeState(IndicesOptions options) { * It does NOT consider the open or closed status of index resources. */ private static Stream matchResourcesToWildcard(Context context, String wildcardExpression) { - assert Regex.isSimpleMatchPattern(wildcardExpression); + assert isWildcard(wildcardExpression); final SortedMap indicesLookup = context.getState().getMetadata().getIndicesLookup(); Stream matchesStream; if (Regex.isSuffixMatchPattern(wildcardExpression)) { @@ -1364,11 +1283,6 @@ private static Stream expandToOpenClosed(Context context, Stream expressions) { - return expressions.isEmpty() - || (expressions.size() == 1 && (Metadata.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); - } - private static List resolveEmptyOrTrivialWildcard(Context context) { final String[] allIndices = resolveEmptyOrTrivialWildcardToAllIndices(context.getOptions(), context.getState().metadata()); if (context.systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { @@ -1434,19 +1348,10 @@ private DateMathExpressionResolver() { // utility class } - public static List resolve(final Context context, List expressions) { + public static List resolve(Context context, List expressions) { List result = new ArrayList<>(expressions.size()); - boolean wildcardSeen = false; - for (String expression : expressions) { - // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template - if (Strings.hasLength(expression) && expression.charAt(0) == '-' && wildcardSeen) { - result.add("-" + resolveExpression(expression.substring(1), context::getStartTime)); - } else { - result.add(resolveExpression(expression, context::getStartTime)); - } - if (context.getOptions().expandWildcardExpressions() && Regex.isSimpleMatchPattern(expression)) { - wildcardSeen = true; - } + for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { + result.add(resolveExpression(expression, context::getStartTime)); } return result; } @@ -1455,6 +1360,15 @@ static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } + static String resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { + if (expression.isExclusion()) { + // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template + return "-" + resolveExpression(expression.get(), getTime); + } else { + return resolveExpression(expression.get(), getTime); + } + } + @SuppressWarnings("fallthrough") static String resolveExpression(String expression, LongSupplier getTime) { if (expression.startsWith(EXPRESSION_LEFT_BOUND) == false || expression.endsWith(EXPRESSION_RIGHT_BOUND) == false) { @@ -1601,6 +1515,141 @@ static String resolveExpression(String expression, LongSupplier getTime) { } } + public static final class ExplicitResourceNameFilter { + + private ExplicitResourceNameFilter() { + // Utility class + } + + /** + * Returns an expression list with "unavailable" (missing or not acceptable) resource names filtered out. + * Only explicit resource names are considered for filtering. Wildcard and exclusion expressions are kept in. + */ + public static List filterUnavailable(Context context, List expressions) { + ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); + List result = new ArrayList<>(expressions.size()); + for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { + validateAliasOrIndex(expression); + if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression.get())) { + result.add(expression.expression()); + } + } + return result; + } + + /** + * This returns `true` if the given {@param name} is of a resource that exists. + * Otherwise, it returns `false` if the `ignore_unvailable` option is `true`, or, if `false`, it throws a "not found" type of + * exception. + */ + @Nullable + private static boolean ensureAliasOrIndexExists(Context context, String name) { + boolean ignoreUnavailable = context.getOptions().ignoreUnavailable(); + IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(name); + if (indexAbstraction == null) { + if (ignoreUnavailable) { + return false; + } else { + throw notFoundException(name); + } + } + // treat aliases as unavailable indices when ignoreAliases is set to true (e.g. delete index and update aliases api) + if (indexAbstraction.getType() == Type.ALIAS && context.getOptions().ignoreAliases()) { + if (ignoreUnavailable) { + return false; + } else { + throw aliasesNotSupportedException(name); + } + } + if (indexAbstraction.isDataStreamRelated() && context.includeDataStreams() == false) { + if (ignoreUnavailable) { + return false; + } else { + IndexNotFoundException infe = notFoundException(name); + // Allows callers to handle IndexNotFoundException differently based on whether data streams were excluded. + infe.addMetadata(EXCLUDED_DATA_STREAMS_KEY, "true"); + throw infe; + } + } + return true; + } + + private static void validateAliasOrIndex(ExpressionList.Expression expression) { + if (Strings.isEmpty(expression.expression())) { + throw notFoundException(expression.expression()); + } + // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API + // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, + // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown + // if the expression can't be found. + if (expression.expression().charAt(0) == '_') { + throw new InvalidIndexNameException(expression.expression(), "must not start with '_'."); + } + } + + private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { + if (options.ignoreUnavailable() == false) { + List crossClusterIndices = indexExpressions.stream().filter(index -> index.contains(":")).toList(); + if (crossClusterIndices.size() > 0) { + throw new IllegalArgumentException( + "Cross-cluster calls are not supported in this context but remote indices were requested: " + crossClusterIndices + ); + } + } + } + } + + /** + * Used to iterate expression lists and work out which expression item is a wildcard or an exclusion. + */ + public static final class ExpressionList implements Iterable { + private final List expressionsList; + private final boolean hasWildcard; + + public record Expression(String expression, boolean isWildcard, boolean isExclusion) { + public String get() { + if (isExclusion()) { + // drop the leading "-" if exclusion because it is easier for callers to handle it like this + return expression().substring(1); + } else { + return expression(); + } + } + } + + /** + * Creates the expression iterable that can be used to easily check which expression item is a wildcard or an exclusion (or both). + * The {@param context} is used to check if wildcards ought to be considered or not. + */ + public ExpressionList(Context context, List expressionStrings) { + List expressionsList = new ArrayList<>(expressionStrings.size()); + boolean wildcardSeen = false; + for (String expressionString : expressionStrings) { + boolean isExclusion = expressionString.startsWith("-") && wildcardSeen; + if (context.getOptions().expandWildcardExpressions() && isWildcard(expressionString)) { + wildcardSeen = true; + expressionsList.add(new Expression(expressionString, true, isExclusion)); + } else { + expressionsList.add(new Expression(expressionString, false, isExclusion)); + } + } + this.expressionsList = expressionsList; + this.hasWildcard = wildcardSeen; + } + + /** + * Returns {@code true} if the expression contains any wildcard and the options allow wildcard expansion + */ + public boolean hasWildcard() { + return this.hasWildcard; + } + + @Override + public Iterator iterator() { + return expressionsList.iterator(); + } + } + /** * This is a context for the DateMathExpressionResolver which does not require {@code IndicesOptions} or {@code ClusterState} * since it uses only the start time to resolve expressions. @@ -1624,4 +1673,8 @@ public IndicesOptions getOptions() { throw new UnsupportedOperationException("should never be called"); } } + + private static boolean isWildcard(String expression) { + return Regex.isSimpleMatchPattern(expression); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java index e5f37ab537ce..dfc05552d3e7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java @@ -315,14 +315,14 @@ public IndexTemplateMetadata build() { * This method is used for serializing templates before storing them in the cluster metadata, * and also in the REST layer when returning a deprecated typed response. */ - public static void toXContentWithTypes( + public static XContentBuilder toXContentWithTypes( IndexTemplateMetadata indexTemplateMetadata, XContentBuilder builder, ToXContent.Params params ) throws IOException { builder.startObject(indexTemplateMetadata.name()); toInnerXContent(indexTemplateMetadata, builder, params, true); - builder.endObject(); + return builder.endObject(); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java index 17f1b75c2e54..d102624ccb06 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java @@ -9,7 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.compress.CompressedXContent; @@ -75,7 +75,7 @@ public MappingMetadata(String type, Map mapping) { } public static void writeMappingMetadata(StreamOutput out, Map mappings) throws IOException { - out.writeMap(mappings, StreamOutput::writeString, out.getVersion().before(Version.V_8_0_0) ? (o, v) -> { + out.writeMap(mappings, StreamOutput::writeString, out.getTransportVersion().before(TransportVersion.V_8_0_0) ? (o, v) -> { o.writeVInt(v == EMPTY_MAPPINGS ? 0 : 1); if (v != EMPTY_MAPPINGS) { o.writeString(MapperService.SINGLE_MAPPING_NAME); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 21dd59e79199..4d19109738a8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; @@ -22,11 +23,13 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.coordination.CoordinationMetadata; +import org.elasticsearch.cluster.coordination.PublicationTransportHandler; import org.elasticsearch.cluster.metadata.IndexAbstraction.ConcreteIndex; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.VersionedNamedWriteable; @@ -37,6 +40,8 @@ import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; @@ -51,7 +56,6 @@ import org.elasticsearch.xcontent.NamedObjectNotFoundException; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -91,7 +95,7 @@ * The details of how this is persisted are covered in {@link org.elasticsearch.gateway.PersistedClusterStateService}. *

*/ -public class Metadata extends AbstractCollection implements Diffable, ToXContentFragment { +public class Metadata extends AbstractCollection implements Diffable, ChunkedToXContent { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -137,7 +141,7 @@ public enum XContentContext { * Custom metadata that persists (via XContent) across restarts. The deserialization method for each implementation must be registered * with the {@link NamedXContentRegistry}. */ - public interface Custom extends NamedDiffable, ToXContentFragment { + public interface Custom extends NamedDiffable, ChunkedToXContent { EnumSet context(); @@ -279,6 +283,33 @@ private Metadata( this.mappingsByHash = mappingsByHash; this.oldestIndexVersion = oldestIndexVersion; this.reservedStateMetadata = reservedStateMetadata; + assert assertConsistent(); + } + + private boolean assertConsistent() { + final var lookup = this.indicesLookup; + final var dsMetadata = custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY); + assert lookup == null || lookup.equals(Builder.buildIndicesLookup(dsMetadata, indices)); + try { + Builder.ensureNoNameCollisions(aliasedIndices.keySet(), indices, dsMetadata); + } catch (Exception e) { + assert false : e; + } + assert Builder.assertDataStreams(indices, dsMetadata); + assert Set.of(allIndices).equals(indices.keySet()); + final Function, Set> indicesByPredicate = predicate -> indices.entrySet() + .stream() + .filter(entry -> predicate.test(entry.getValue())) + .map(Map.Entry::getKey) + .collect(Collectors.toUnmodifiableSet()); + assert Set.of(allOpenIndices).equals(indicesByPredicate.apply(idx -> idx.getState() == IndexMetadata.State.OPEN)); + assert Set.of(allClosedIndices).equals(indicesByPredicate.apply(idx -> idx.getState() == IndexMetadata.State.CLOSE)); + assert Set.of(visibleIndices).equals(indicesByPredicate.apply(idx -> idx.isHidden() == false)); + assert Set.of(visibleOpenIndices) + .equals(indicesByPredicate.apply(idx -> idx.isHidden() == false && idx.getState() == IndexMetadata.State.OPEN)); + assert Set.of(visibleClosedIndices) + .equals(indicesByPredicate.apply(idx -> idx.isHidden() == false && idx.getState() == IndexMetadata.State.CLOSE)); + return true; } public Metadata withIncrementedVersion() { @@ -440,6 +471,42 @@ public Metadata withCoordinationMetadata(CoordinationMetadata coordinationMetada ); } + public Metadata withLastCommittedValues( + boolean clusterUUIDCommitted, + CoordinationMetadata.VotingConfiguration lastCommittedConfiguration + ) { + if (clusterUUIDCommitted == this.clusterUUIDCommitted + && lastCommittedConfiguration.equals(this.coordinationMetadata.getLastCommittedConfiguration())) { + return this; + } + return new Metadata( + clusterUUID, + clusterUUIDCommitted, + version, + CoordinationMetadata.builder(coordinationMetadata).lastCommittedConfiguration(lastCommittedConfiguration).build(), + transientSettings, + persistentSettings, + settings, + hashesOfConsistentSettings, + totalNumberOfShards, + totalOpenIndexShards, + indices, + aliasedIndices, + templates, + customs, + allIndices, + visibleIndices, + allOpenIndices, + visibleOpenIndices, + allClosedIndices, + visibleClosedIndices, + indicesLookup, + mappingsByHash, + oldestIndexVersion, + reservedStateMetadata + ); + } + /** * Creates a copy of this instance updated with the given {@link IndexMetadata} that must only contain changes to primary terms * and in-sync allocation ids relative to the existing entries. This method is only used by @@ -1297,6 +1364,9 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 if (customCount1 != customCount2) { return false; } + if (Objects.equals(metadata1.reservedStateMetadata, metadata2.reservedStateMetadata) == false) { + return false; + } return true; } @@ -1306,7 +1376,7 @@ public Diff diff(Metadata previousState) { } public static Diff readDiffFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(MetadataDiff.NOOP_METADATA_DIFF_VERSION) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(MetadataDiff.NOOP_METADATA_DIFF_VERSION) && in.readBoolean()) { return SimpleDiffable.empty(); } return new MetadataDiff(in); @@ -1317,9 +1387,55 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - Builder.toXContent(this, builder, params); - return builder; + public Iterator toXContentChunked(ToXContent.Params p) { + XContentContext context = XContentContext.valueOf(p.param(CONTEXT_MODE_PARAM, CONTEXT_MODE_API)); + final Iterator start = context == XContentContext.API + ? ChunkedToXContentHelper.startObject("metadata") + : Iterators.single((builder, params) -> builder.startObject("meta-data").field("version", version())); + + final Iterator persistentSettings = context != XContentContext.API && persistentSettings().isEmpty() == false + ? Iterators.single((builder, params) -> { + builder.startObject("settings"); + persistentSettings().toXContent(builder, new ToXContent.MapParams(Collections.singletonMap("flat_settings", "true"))); + return builder.endObject(); + }) + : Collections.emptyIterator(); + + final Iterator indices = context == XContentContext.API + ? ChunkedToXContentHelper.wrapWithObject("indices", indices().values().iterator()) + : Collections.emptyIterator(); + + return Iterators.concat(start, Iterators.single((builder, params) -> { + builder.field("cluster_uuid", clusterUUID); + builder.field("cluster_uuid_committed", clusterUUIDCommitted); + builder.startObject("cluster_coordination"); + coordinationMetadata().toXContent(builder, params); + return builder.endObject(); + }), + persistentSettings, + ChunkedToXContentHelper.wrapWithObject( + "templates", + templates().values() + .stream() + .map( + template -> (ToXContent) (builder, params) -> IndexTemplateMetadata.Builder.toXContentWithTypes( + template, + builder, + params + ) + ) + .iterator() + ), + indices, + Iterators.flatMap( + customs.entrySet().iterator(), + entry -> entry.getValue().context().contains(context) + ? ChunkedToXContentHelper.wrapWithObject(entry.getKey(), entry.getValue().toXContentChunked(p)) + : Collections.emptyIterator() + ), + ChunkedToXContentHelper.wrapWithObject("reserved_state", reservedStateMetadata().values().iterator()), + ChunkedToXContentHelper.endObject() + ); } public Map getMappingsByHash() { @@ -1328,7 +1444,9 @@ public Map getMappingsByHash() { private static class MetadataDiff implements Diff { - private static final Version NOOP_METADATA_DIFF_VERSION = Version.V_8_5_0; + private static final TransportVersion NOOP_METADATA_DIFF_VERSION = TransportVersion.V_8_5_0; + private static final TransportVersion NOOP_METADATA_DIFF_SAFE_VERSION = + PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION; private final long version; private final String clusterUUID; @@ -1394,7 +1512,7 @@ private MetadataDiff(StreamInput in) throws IOException { coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); } else { hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; @@ -1402,7 +1520,7 @@ private MetadataDiff(StreamInput in) throws IOException { indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { reservedStateMetadata = DiffableUtils.readJdkMapDiff( in, DiffableUtils.getStringKeySerializer(), @@ -1415,12 +1533,15 @@ private MetadataDiff(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(NOOP_METADATA_DIFF_VERSION)) { + if (out.getTransportVersion().onOrAfter(NOOP_METADATA_DIFF_SAFE_VERSION)) { out.writeBoolean(empty); if (empty) { // noop diff return; } + } else if (out.getTransportVersion().onOrAfter(NOOP_METADATA_DIFF_VERSION)) { + // noops are not safe with these versions, see #92259 + out.writeBoolean(false); } out.writeString(clusterUUID); out.writeBoolean(clusterUUIDCommitted); @@ -1428,13 +1549,13 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); transientSettings.writeTo(out); persistentSettings.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { hashesOfConsistentSettings.writeTo(out); } indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { reservedStateMetadata.writeTo(out); } } @@ -1467,7 +1588,7 @@ public Metadata apply(Metadata part) { } } - public static final Version MAPPINGS_AS_HASH_VERSION = Version.V_8_1_0; + public static final TransportVersion MAPPINGS_AS_HASH_VERSION = TransportVersion.V_8_1_0; public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); @@ -1477,11 +1598,11 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getVersion().onOrAfter(Version.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); } final Function mappingLookup; - if (in.getVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION)) { + if (in.getTransportVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION)) { final Map mappingMetadataMap = in.readMapValues(MappingMetadata::new, MappingMetadata::getSha256); if (mappingMetadataMap.size() > 0) { mappingLookup = mappingMetadataMap::get; @@ -1504,7 +1625,7 @@ public static Metadata readFrom(StreamInput in) throws IOException { Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - if (in.getVersion().onOrAfter(Version.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { int reservedStateSize = in.readVInt(); for (int i = 0; i < reservedStateSize; i++) { builder.put(ReservedStateMetadata.readFrom(in)); @@ -1521,22 +1642,22 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); transientSettings.writeTo(out); persistentSettings.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { hashesOfConsistentSettings.writeTo(out); } // Starting in #MAPPINGS_AS_HASH_VERSION we write the mapping metadata first and then write the indices without metadata so that // we avoid writing duplicate mappings twice - if (out.getVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION)) { + if (out.getTransportVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION)) { out.writeMapValues(mappingsByHash); } out.writeVInt(indices.size()); - final boolean writeMappingsHash = out.getVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION); + final boolean writeMappingsHash = out.getTransportVersion().onOrAfter(MAPPINGS_AS_HASH_VERSION); for (IndexMetadata indexMetadata : this) { indexMetadata.writeTo(out, writeMappingsHash); } out.writeCollection(templates.values()); VersionedNamedWriteable.writeVersionedWritables(out, customs); - if (out.getVersion().onOrAfter(Version.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { out.writeCollection(reservedStateMetadata.values()); } } @@ -2468,60 +2589,6 @@ static boolean assertDataStreams(Map indices, DataStreamM return true; } - public static void toXContent(Metadata metadata, XContentBuilder builder, ToXContent.Params params) throws IOException { - XContentContext context = XContentContext.valueOf(params.param(CONTEXT_MODE_PARAM, CONTEXT_MODE_API)); - - if (context == XContentContext.API) { - builder.startObject("metadata"); - } else { - builder.startObject("meta-data"); - builder.field("version", metadata.version()); - } - - builder.field("cluster_uuid", metadata.clusterUUID); - builder.field("cluster_uuid_committed", metadata.clusterUUIDCommitted); - - builder.startObject("cluster_coordination"); - metadata.coordinationMetadata().toXContent(builder, params); - builder.endObject(); - - if (context != XContentContext.API && metadata.persistentSettings().isEmpty() == false) { - builder.startObject("settings"); - metadata.persistentSettings().toXContent(builder, new MapParams(Collections.singletonMap("flat_settings", "true"))); - builder.endObject(); - } - - builder.startObject("templates"); - for (IndexTemplateMetadata template : metadata.templates().values()) { - IndexTemplateMetadata.Builder.toXContentWithTypes(template, builder, params); - } - builder.endObject(); - - if (context == XContentContext.API) { - builder.startObject("indices"); - for (IndexMetadata indexMetadata : metadata) { - IndexMetadata.Builder.toXContent(indexMetadata, builder, params); - } - builder.endObject(); - } - - for (Map.Entry cursor : metadata.customs().entrySet()) { - if (cursor.getValue().context().contains(context)) { - builder.startObject(cursor.getKey()); - cursor.getValue().toXContent(builder, params); - builder.endObject(); - } - } - - builder.startObject("reserved_state"); - for (ReservedStateMetadata ReservedStateMetadata : metadata.reservedStateMetadata().values()) { - ReservedStateMetadata.toXContent(builder, params); - } - builder.endObject(); - - builder.endObject(); - } - public static Metadata fromXContent(XContentParser parser) throws IOException { Builder builder = new Builder(); @@ -2639,7 +2706,7 @@ private void dedupeMapping(IndexMetadata.Builder indexMetadataBuilder) { Map params = Maps.newMapWithExpectedSize(2); params.put("binary", "true"); params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); - FORMAT_PARAMS = new MapParams(params); + FORMAT_PARAMS = new ToXContent.MapParams(params); } /** @@ -2649,7 +2716,7 @@ private void dedupeMapping(IndexMetadata.Builder indexMetadataBuilder) { @Override public void toXContent(XContentBuilder builder, Metadata state) throws IOException { - Builder.toXContent(state, builder, FORMAT_PARAMS); + ChunkedToXContent.wrapAsToXContent(state).toXContent(builder, FORMAT_PARAMS); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 99585895005e..222ec3c68915 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.DataTier; @@ -504,7 +505,13 @@ private ClusterState applyCreateIndexWithTemporaryService( indexService.getIndexEventListener().beforeIndexAddedToCluster(indexMetadata.getIndex(), indexMetadata.getSettings()); - ClusterState updated = clusterStateCreateIndex(currentState, request.blocks(), indexMetadata, metadataTransformer); + ClusterState updated = clusterStateCreateIndex( + currentState, + request.blocks(), + indexMetadata, + metadataTransformer, + allocationService.getShardRoutingRoleStrategy() + ); if (request.performReroute()) { updated = allocationService.reroute(updated, "index [" + indexMetadata.getIndex().getName() + "] created", rerouteListener); } @@ -1219,7 +1226,8 @@ static ClusterState clusterStateCreateIndex( ClusterState currentState, Set clusterBlocks, IndexMetadata indexMetadata, - BiConsumer metadataTransformer + BiConsumer metadataTransformer, + ShardRoutingRoleStrategy shardRoutingRoleStrategy ) { final Metadata newMetadata; if (metadataTransformer != null) { @@ -1236,7 +1244,7 @@ static ClusterState clusterStateCreateIndex( ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metadata(newMetadata).build(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, updatedState.routingTable()) .addAsNew(updatedState.metadata().index(indexName)); return ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java index ed53313a95ed..0ec58787dfb6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java @@ -12,15 +12,20 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterStateAckListener; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.SimpleBatchedAckListenerTaskExecutor; import org.elasticsearch.cluster.metadata.AliasAction.NewAliasValidator; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; @@ -55,6 +60,8 @@ public class MetadataIndexAliasesService { private final NamedXContentRegistry xContentRegistry; + private final ClusterStateTaskExecutor executor; + @Inject public MetadataIndexAliasesService( ClusterService clusterService, @@ -66,20 +73,19 @@ public MetadataIndexAliasesService( this.indicesService = indicesService; this.deleteIndexService = deleteIndexService; this.xContentRegistry = xContentRegistry; - } + this.executor = new SimpleBatchedAckListenerTaskExecutor<>() { - public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener listener) { - submitUnbatchedTask("index-aliases", new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override - public ClusterState execute(ClusterState currentState) { - return applyAliasActions(currentState, request.actions()); + public Tuple executeTask(ApplyAliasesTask applyAliasesTask, ClusterState clusterState) { + return new Tuple<>(applyAliasActions(clusterState, applyAliasesTask.request().actions()), applyAliasesTask); } - }); + }; } - @SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here - private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) { - clusterService.submitUnbatchedStateUpdateTask(source, task); + public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener listener) { + var task = new ApplyAliasesTask(request, listener); + var config = ClusterStateTaskConfig.build(Priority.URGENT); + clusterService.submitStateUpdateTask("index-aliases", task, config, executor); } /** @@ -198,6 +204,11 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable getExecutor() { + return executor; + } + private void validateFilter( List indicesToClose, Map indices, @@ -244,4 +255,43 @@ private static void validateAliasTargetIsNotDSBackingIndex(ClusterState currentS ); } } + + /** + * A cluster state update task that consists of the cluster state request and the listeners that need to be notified upon completion. + */ + record ApplyAliasesTask(IndicesAliasesClusterStateUpdateRequest request, ActionListener listener) + implements + ClusterStateTaskListener, + ClusterStateAckListener { + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return true; + } + + @Override + public void onAllNodesAcked() { + listener.onResponse(AcknowledgedResponse.TRUE); + } + + @Override + public void onAckFailure(Exception e) { + listener.onResponse(AcknowledgedResponse.FALSE); + } + + @Override + public void onAckTimeout() { + listener.onResponse(AcknowledgedResponse.FALSE); + } + + @Override + public TimeValue ackTimeout() { + return request.ackTimeout(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java index 592654ac8c12..322109b1a2d9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexStateService.java @@ -14,7 +14,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse.IndexResult; @@ -45,6 +44,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; import org.elasticsearch.cluster.service.ClusterService; @@ -225,7 +225,8 @@ public ClusterState execute(BatchExecutionContext batchExecuti final Tuple> closingResult = closeRoutingTable( state, task.blockedIndices, - task.verifyResults + task.verifyResults, + allocationService.getShardRoutingRoleStrategy() ); state = closingResult.v1(); final List indices = closingResult.v2(); @@ -619,9 +620,9 @@ private void waitForShardsReadyForClosing( for (int i = 0; i < indexRoutingTable.size(); i++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); final int shardId = shardRoutingTable.shardId().id(); - sendVerifyShardBeforeCloseRequest(shardRoutingTable, closingBlock, new NotifyOnceListener<>() { + sendVerifyShardBeforeCloseRequest(shardRoutingTable, closingBlock, ActionListener.notifyOnce(new ActionListener<>() { @Override - public void innerOnResponse(final ReplicationResponse replicationResponse) { + public void onResponse(ReplicationResponse replicationResponse) { ShardResult.Failure[] failures = Arrays.stream(replicationResponse.getShardInfo().getFailures()) .map(f -> new ShardResult.Failure(f.index(), f.shardId(), f.getCause(), f.nodeId())) .toArray(ShardResult.Failure[]::new); @@ -630,7 +631,7 @@ public void innerOnResponse(final ReplicationResponse replicationResponse) { } @Override - public void innerOnFailure(final Exception e) { + public void onFailure(Exception e) { ShardResult.Failure failure = new ShardResult.Failure(index.getName(), shardId, e); results.setOnce(shardId, new ShardResult(shardId, new ShardResult.Failure[] { failure })); processIfFinished(); @@ -641,7 +642,7 @@ private void processIfFinished() { onResponse.accept(new IndexResult(index, results.toArray(new ShardResult[results.length()]))); } } - }); + })); } } @@ -749,9 +750,9 @@ private void waitForShardsReady( for (int i = 0; i < indexRoutingTable.size(); i++) { IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(i); final int shardId = shardRoutingTable.shardId().id(); - sendVerifyShardBlockRequest(shardRoutingTable, clusterBlock, new NotifyOnceListener<>() { + sendVerifyShardBlockRequest(shardRoutingTable, clusterBlock, ActionListener.notifyOnce(new ActionListener<>() { @Override - public void innerOnResponse(final ReplicationResponse replicationResponse) { + public void onResponse(ReplicationResponse replicationResponse) { AddBlockShardResult.Failure[] failures = Arrays.stream(replicationResponse.getShardInfo().getFailures()) .map(f -> new AddBlockShardResult.Failure(f.index(), f.shardId(), f.getCause(), f.nodeId())) .toArray(AddBlockShardResult.Failure[]::new); @@ -760,7 +761,7 @@ public void innerOnResponse(final ReplicationResponse replicationResponse) { } @Override - public void innerOnFailure(final Exception e) { + public void onFailure(Exception e) { AddBlockShardResult.Failure failure = new AddBlockShardResult.Failure(index.getName(), shardId, e); results.setOnce(shardId, new AddBlockShardResult(shardId, new AddBlockShardResult.Failure[] { failure })); processIfFinished(); @@ -773,7 +774,7 @@ private void processIfFinished() { onResponse.accept(result); } } - }); + })); } } @@ -809,11 +810,12 @@ private void sendVerifyShardBlockRequest( static Tuple> closeRoutingTable( final ClusterState currentState, final Map blockedIndices, - final Map verifyResult + final Map verifyResult, + ShardRoutingRoleStrategy shardRoutingRoleStrategy ) { final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); final ClusterBlocks.Builder blocks = ClusterBlocks.builder(currentState.blocks()); - final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); + final RoutingTable.Builder routingTable = RoutingTable.builder(shardRoutingRoleStrategy, currentState.routingTable()); final Set closedIndices = new HashSet<>(); Map closingResults = new HashMap<>(verifyResult); @@ -1151,7 +1153,10 @@ private ClusterState openIndices(final Index[] indices, final ClusterState curre ClusterState updatedState = ClusterState.builder(currentState).metadata(metadata).blocks(blocks).build(); - final RoutingTable.Builder routingTable = RoutingTable.builder(updatedState.routingTable()); + final RoutingTable.Builder routingTable = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + updatedState.routingTable() + ); for (IndexMetadata previousIndexMetadata : indicesToOpen) { if (previousIndexMetadata.getState() != IndexMetadata.State.OPEN) { routingTable.addAsFromCloseToOpen(updatedState.metadata().getIndexSafe(previousIndexMetadata.getIndex())); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 2e5cdab353da..3dca87dcbde4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -38,7 +38,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; @@ -309,14 +308,7 @@ public ClusterState addComponentTemplate( final String composableTemplateName = entry.getKey(); final ComposableIndexTemplate composableTemplate = entry.getValue(); try { - validateCompositeTemplate( - tempStateWithComponentTemplateAdded, - composableTemplateName, - composableTemplate, - indicesService, - xContentRegistry, - systemIndices - ); + validateIndexTemplateV2(composableTemplateName, composableTemplate, tempStateWithComponentTemplateAdded); } catch (Exception e) { if (validationFailure == null) { validationFailure = new IllegalArgumentException( @@ -513,17 +505,34 @@ public static void validateV2TemplateRequest(Metadata metadata, String name, Com } final Map componentTemplates = metadata.componentTemplates(); + final List ignoreMissingComponentTemplates = (template.getIgnoreMissingComponentTemplates() == null + ? List.of() + : template.getIgnoreMissingComponentTemplates()); final List missingComponentTemplates = template.composedOf() .stream() .filter(componentTemplate -> componentTemplates.containsKey(componentTemplate) == false) + .filter(componentTemplate -> ignoreMissingComponentTemplates.contains(componentTemplate) == false) .toList(); - if (missingComponentTemplates.size() > 0) { + if (missingComponentTemplates.size() > 0 && ignoreMissingComponentTemplates.size() == 0) { throw new InvalidIndexTemplateException( name, "index template [" + name + "] specifies component templates " + missingComponentTemplates + " that do not exist" ); } + + if (missingComponentTemplates.size() > 0 && ignoreMissingComponentTemplates.size() > 0) { + + throw new InvalidIndexTemplateException( + name, + "index template [" + + name + + "] specifies a missing component templates " + + missingComponentTemplates + + " " + + "that does not exist and is not part of 'ignore_missing_component_templates'" + ); + } } public ClusterState addIndexTemplateV2( @@ -587,7 +596,8 @@ public ClusterState addIndexTemplateV2( template.version(), template.metadata(), template.getDataStreamTemplate(), - template.getAllowAutoCreate() + template.getAllowAutoCreate(), + template.getIgnoreMissingComponentTemplates() ); } @@ -687,12 +697,12 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT indexTemplate.version(), indexTemplate.metadata(), indexTemplate.getDataStreamTemplate(), - indexTemplate.getAllowAutoCreate() + indexTemplate.getAllowAutoCreate(), + indexTemplate.getIgnoreMissingComponentTemplates() ); validate(name, templateToValidate); validateDataStreamsStillReferenced(currentState, name, templateToValidate); - validateTsdbDataStreamsReferringTsdbTemplate(currentState, name, templateToValidate); // Finally, right before adding the template, we need to ensure that the composite settings, // mappings, and aliases are valid after it's been composed with the component templates @@ -765,56 +775,6 @@ private static void validateDataStreamsStillReferenced(ClusterState state, Strin } } - // This method should be invoked after validateDataStreamsStillReferenced(...) - private static void validateTsdbDataStreamsReferringTsdbTemplate( - ClusterState state, - String templateName, - ComposableIndexTemplate newTemplate - ) { - Metadata currentMetadata = state.getMetadata(); - Metadata updatedMetadata = null; - Set dataStreamsWithNonTsdbTemplate = null; - - for (var dataStream : state.metadata().dataStreams().values()) { - if (dataStream.getIndexMode() != IndexMode.TIME_SERIES) { - continue; - } - - if (updatedMetadata == null) { - updatedMetadata = Metadata.builder(state.metadata()).put(templateName, newTemplate).build(); - } - var matchingTemplate = findV2Template(updatedMetadata, dataStream.getName(), false); - if (templateName.equals(matchingTemplate)) { - if (currentMetadata.isTimeSeriesTemplate(newTemplate) == false) { - if (dataStreamsWithNonTsdbTemplate == null) { - dataStreamsWithNonTsdbTemplate = new HashSet<>(); - } - dataStreamsWithNonTsdbTemplate.add(dataStream.getName()); - } - } - } - - if (dataStreamsWithNonTsdbTemplate != null) { - var settings = MetadataIndexTemplateService.resolveSettings(newTemplate, currentMetadata.componentTemplates()); - var routingPaths = IndexMetadata.INDEX_ROUTING_PATH.get(settings); - throw new IllegalArgumentException( - "composable template [" - + templateName - + "] with index patterns " - + newTemplate.indexPatterns() - + ", priority [" - + newTemplate.priority() - + "]" - + ", index.routing_path " - + routingPaths - + " " - + "would cause tsdb data streams " - + dataStreamsWithNonTsdbTemplate - + " to no longer match a data stream template with a time_series index_mode" - ); - } - } - /** * Return a map of v1 template names to their index patterns for v1 templates that would overlap * with the given v2 template's index patterns. diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 0c138087e6e5..a85471d04ad7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -212,7 +212,10 @@ ClusterState execute(ClusterState currentState) { * * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? */ - routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); @@ -302,6 +305,11 @@ ClusterState execute(ClusterState currentState) { return updatedState; } + + @Override + public String toString() { + return request.toString(); + } } public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index ab1f1a6812f5..2efbc1c372aa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; @@ -16,14 +16,16 @@ import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.EnumSet; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -39,7 +41,7 @@ */ public class NodesShutdownMetadata implements Metadata.Custom { public static final String TYPE = "node_shutdown"; - public static final Version NODE_SHUTDOWN_VERSION = Version.V_7_13_0; + public static final TransportVersion NODE_SHUTDOWN_VERSION = TransportVersion.V_7_13_0; public static final NodesShutdownMetadata EMPTY = new NodesShutdownMetadata(Map.of()); private static final ParseField NODES_FIELD = new ParseField("nodes"); @@ -149,7 +151,7 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { + public TransportVersion getMinimalSupportedVersion() { return NODE_SHUTDOWN_VERSION; } @@ -167,9 +169,8 @@ public int hashCode() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(NODES_FIELD.getPreferredName(), nodes); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return ChunkedToXContentHelper.xContentValuesMap(NODES_FIELD.getPreferredName(), nodes); } /** @@ -213,7 +214,7 @@ static Diff readNodesDiffFrom(StreamInput in) throws } @Override - public Version getMinimalSupportedVersion() { + public TransportVersion getMinimalSupportedVersion() { return NODE_SHUTDOWN_VERSION; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java index 720e57f7a194..69dc3f41ea71 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java @@ -9,7 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata.Custom; @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; +import java.util.Iterator; import java.util.List; import java.util.function.UnaryOperator; @@ -166,8 +167,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT.minimumCompatibilityVersion(); + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.CURRENT.minimumCompatibilityVersion(); } public RepositoriesMetadata(StreamInput in) throws IOException { @@ -250,15 +251,11 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO return new RepositoriesMetadata(repository); } - /** - * {@inheritDoc} - */ @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - for (RepositoryMetadata repository : repositories) { - toXContent(repository, builder, params); - } - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return repositories.stream() + .map(repository -> (ToXContent) (builder, params) -> toXContent(repository, builder, params)) + .iterator(); } @Override @@ -273,7 +270,8 @@ public EnumSet context() { * @param builder XContent builder * @param params serialization parameters */ - public static void toXContent(RepositoryMetadata repository, XContentBuilder builder, ToXContent.Params params) throws IOException { + public static XContentBuilder toXContent(RepositoryMetadata repository, XContentBuilder builder, ToXContent.Params params) + throws IOException { builder.startObject(repository.name()); builder.field("type", repository.type()); if (repository.uuid().equals(RepositoryData.MISSING_UUID) == false) { @@ -288,6 +286,7 @@ public static void toXContent(RepositoryMetadata repository, XContentBuilder bui builder.field("pending_generation", repository.pendingGeneration()); } builder.endObject(); + return builder; } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java index 7790e265220a..66e339a474ed 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetadata.java @@ -129,7 +129,7 @@ public long pendingGeneration() { public RepositoryMetadata(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION)) { + if (in.getTransportVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION.transportVersion)) { uuid = in.readString(); } else { uuid = RepositoryData.MISSING_UUID; @@ -148,7 +148,7 @@ public RepositoryMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION)) { + if (out.getTransportVersion().onOrAfter(SnapshotsService.UUIDS_IN_REPO_DATA_VERSION.transportVersion)) { out.writeString(uuid); } out.writeString(type); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index dbde71aeef67..7f7f8a87cbfb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -22,7 +22,7 @@ import java.util.Objects; public class ShutdownShardMigrationStatus implements Writeable, ToXContentObject { - private static final Version ALLOCATION_DECISION_ADDED_VERSION = Version.V_7_16_0; + private static final TransportVersion ALLOCATION_DECISION_ADDED_VERSION = TransportVersion.V_7_16_0; public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; @@ -57,7 +57,7 @@ public ShutdownShardMigrationStatus(StreamInput in) throws IOException { this.status = in.readEnum(SingleNodeShutdownMetadata.Status.class); this.shardsRemaining = in.readLong(); this.explanation = in.readOptionalString(); - if (in.getVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { + if (in.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { this.allocationDecision = in.readOptionalWriteable(ShardAllocationDecision::new); } else { this.allocationDecision = null; @@ -100,7 +100,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(status); out.writeLong(shardsRemaining); out.writeOptionalString(explanation); - if (out.getVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { + if (out.getTransportVersion().onOrAfter(ALLOCATION_DECISION_ADDED_VERSION)) { out.writeOptionalWriteable(allocationDecision); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index 489389b24769..69500c2e9b90 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -33,7 +33,7 @@ */ public class SingleNodeShutdownMetadata implements SimpleDiffable, ToXContentObject { - public static final Version REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = Version.V_7_16_0; + public static final TransportVersion REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = TransportVersion.V_7_16_0; public static final ParseField NODE_ID_FIELD = new ParseField("node_id"); public static final ParseField TYPE_FIELD = new ParseField("type"); @@ -133,7 +133,7 @@ public SingleNodeShutdownMetadata(StreamInput in) throws IOException { this.startedAtMillis = in.readVLong(); this.nodeSeen = in.readBoolean(); this.allocationDelay = in.readOptionalTimeValue(); - if (in.getVersion().onOrAfter(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION)) { + if (in.getTransportVersion().onOrAfter(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION)) { this.targetNodeName = in.readOptionalString(); } else { this.targetNodeName = null; @@ -199,7 +199,7 @@ public TimeValue getAllocationDelay() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeId); - if (out.getVersion().before(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION) && this.type == SingleNodeShutdownMetadata.Type.REPLACE) { + if (out.getTransportVersion().before(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION) && this.type == SingleNodeShutdownMetadata.Type.REPLACE) { out.writeEnum(SingleNodeShutdownMetadata.Type.REMOVE); } else { out.writeEnum(type); @@ -208,7 +208,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(startedAtMillis); out.writeBoolean(nodeSeen); out.writeOptionalTimeValue(allocationDelay); - if (out.getVersion().onOrAfter(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION)) { + if (out.getTransportVersion().onOrAfter(REPLACE_SHUTDOWN_TYPE_ADDED_VERSION)) { out.writeOptionalString(targetNodeName); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 73cf25fd3e8e..4db38a81c5f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.node; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -63,7 +64,7 @@ public static boolean isStateless(final Settings settings) { } static final String COORDINATING_ONLY = "coordinating_only"; - public static final Version EXTERNAL_ID_VERSION = Version.V_8_3_0; + public static final TransportVersion EXTERNAL_ID_VERSION = TransportVersion.V_8_3_0; public static final Comparator DISCOVERY_NODE_COMPARATOR = Comparator.comparing(DiscoveryNode::getName) .thenComparing(DiscoveryNode::getId); @@ -138,7 +139,6 @@ public static boolean isDedicatedFrozenNode(final Settings settings) { private final Map attributes; private final Version version; private final SortedSet roles; - private final Set roleNames; private final String externalId; @@ -412,7 +412,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } this.roles = Collections.unmodifiableSortedSet(roles); this.version = Version.readVersion(in); - if (in.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) { + if (in.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { this.externalId = readStringLiteral.read(in); } else { this.externalId = nodeName; @@ -445,7 +445,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeBoolean(role.canContainData()); }); Version.writeVersion(version, out); - if (out.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) { + if (out.getTransportVersion().onOrAfter(EXTERNAL_ID_VERSION)) { out.writeString(externalId); } } @@ -605,6 +605,7 @@ public void appendDescriptionWithoutAttributes(StringBuilder stringBuilder) { roles.stream().map(DiscoveryNodeRole::roleNameAbbreviation).sorted().forEach(stringBuilder::append); stringBuilder.append('}'); } + stringBuilder.append('{').append(version).append('}'); } public String descriptionWithoutAttributes() { @@ -620,19 +621,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("ephemeral_id", getEphemeralId()); builder.field("transport_address", getAddress().toString()); builder.field("external_id", getExternalId()); - - builder.startObject("attributes"); - for (Map.Entry entry : attributes.entrySet()) { - builder.field(entry.getKey(), entry.getValue()); - } - builder.endObject(); - + builder.stringStringMap("attributes", attributes); builder.startArray("roles"); for (DiscoveryNodeRole role : roles) { builder.value(role.roleName()); } builder.endArray(); - + builder.field("version", version); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index 11440ffeca90..ceaa696f6e55 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -36,6 +36,7 @@ import java.util.Set; import java.util.function.IntConsumer; import java.util.function.IntSupplier; +import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -233,7 +234,7 @@ public void collectSearchShards(String routing, IntConsumer consumer) { } public static class ExtractFromSource extends IndexRouting { - private final List routingPaths; + private final Predicate isRoutingPath; private final XContentParserConfiguration parserConfig; ExtractFromSource(IndexMetadata metadata) { @@ -241,7 +242,8 @@ public static class ExtractFromSource extends IndexRouting { if (metadata.isRoutingPartitionedIndex()) { throw new IllegalArgumentException("routing_partition_size is incompatible with routing_path"); } - this.routingPaths = metadata.getRoutingPaths(); + List routingPaths = metadata.getRoutingPaths(); + isRoutingPath = Regex.simpleMatcher(routingPaths.toArray(String[]::new)); this.parserConfig = XContentParserConfiguration.EMPTY.withFiltering(Set.copyOf(routingPaths), null, true); } @@ -262,7 +264,7 @@ public String createId(XContentType sourceType, BytesReference source, byte[] su public String createId(Map flat, byte[] suffix) { Builder b = builder(); for (Map.Entry e : flat.entrySet()) { - if (Regex.simpleMatch(routingPaths, e.getKey())) { + if (isRoutingPath.test(e.getKey())) { b.hashes.add(new NameAndHash(new BytesRef(e.getKey()), hash(new BytesRef(e.getValue().toString())))); } } @@ -299,7 +301,7 @@ public class Builder { private final List hashes = new ArrayList<>(); public void addMatching(String fieldName, BytesRef string) { - if (Regex.simpleMatch(routingPaths, fieldName)) { + if (isRoutingPath.test(fieldName)) { hashes.add(new NameAndHash(new BytesRef(fieldName), hash(string))); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 8924a47b03bd..0c62dce1b220 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Stream; /** * The {@link IndexRoutingTable} represents routing information for a single @@ -50,11 +51,15 @@ */ public class IndexRoutingTable implements SimpleDiffable { - private static final List> PRIORITY_REMOVE_CLAUSES = List.of( - ShardRouting::unassigned, - ShardRouting::initializing, + private static final List> PRIORITY_REMOVE_CLAUSES = Stream.>of( + shardRouting -> shardRouting.isPromotableToPrimary() == false, shardRouting -> true - ); + ) + .flatMap( + p1 -> Stream.>of(ShardRouting::unassigned, ShardRouting::initializing, shardRouting -> true) + .map(p1::and) + ) + .toList(); private final Index index; private final ShardShuffler shuffler; @@ -131,7 +136,9 @@ boolean validate(Metadata metadata) { ); } final Set inSyncAllocationIds = indexMetadata.inSyncAllocationIds(shardRouting.id()); - if (shardRouting.active() && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) { + if (shardRouting.active() + && shardRouting.isPromotableToPrimary() + && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) { throw new IllegalStateException( "active shard routing " + shardRouting @@ -302,7 +309,7 @@ public int hashCode() { public static IndexRoutingTable readFrom(StreamInput in) throws IOException { Index index = new Index(in); - Builder builder = new Builder(index); + Builder builder = new Builder(ShardRoutingRoleStrategy.NO_SHARD_CREATION, index); int size = in.readVInt(); builder.ensureShardArray(size); @@ -324,15 +331,21 @@ public void writeTo(StreamOutput out) throws IOException { } public static Builder builder(Index index) { - return new Builder(index); + return new Builder(ShardRoutingRoleStrategy.NO_SHARD_CREATION, index); + } + + public static Builder builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy, Index index) { + return new Builder(shardRoutingRoleStrategy, index); } public static class Builder { + private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; private final Index index; private IndexShardRoutingTable.Builder[] shards; - public Builder(Index index) { + public Builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy, Index index) { + this.shardRoutingRoleStrategy = shardRoutingRoleStrategy; this.index = index; } @@ -438,7 +451,8 @@ private Builder initializeAsRestore( shardId, primary, primary ? EmptyStoreRecoverySource.INSTANCE : PeerRecoverySource.INSTANCE, - unassignedInfo + unassignedInfo, + shardRoutingRoleStrategy.newRestoredRole(i) ) ); } else { @@ -447,7 +461,8 @@ private Builder initializeAsRestore( shardId, primary, primary ? recoverySource : PeerRecoverySource.INSTANCE, - withLastAllocatedNodeId(unassignedInfo, previousNodes, i) + withLastAllocatedNodeId(unassignedInfo, previousNodes, i), + shardRoutingRoleStrategy.newRestoredRole(i) ) ); } @@ -493,7 +508,8 @@ private Builder initializeEmpty( shardId, primary, primary ? primaryRecoverySource : PeerRecoverySource.INSTANCE, - withLastAllocatedNodeId(unassignedInfo, previousNodes, i) + withLastAllocatedNodeId(unassignedInfo, previousNodes, i), + shardRoutingRoleStrategy.newEmptyRole(i) ) ); } @@ -546,7 +562,7 @@ private static UnassignedInfo withLastAllocatedNodeId(UnassignedInfo unassignedI ); } - public Builder addReplica() { + public Builder addReplica(ShardRouting.Role role) { assert shards != null; for (IndexShardRoutingTable.Builder existing : shards) { assert existing != null; @@ -556,7 +572,8 @@ public Builder addReplica() { existing.shardId(), false, PeerRecoverySource.INSTANCE, - new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null) + new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null), + role ) ); } @@ -580,16 +597,12 @@ public Builder removeReplica() { builder.addShard(shardRouting); } - boolean removed = false; - for (Predicate removeClause : PRIORITY_REMOVE_CLAUSES) { - if (removed == false) { - for (int copy = 0; copy < indexShard.size(); copy++) { - ShardRouting shardRouting = indexShard.shard(copy); - if (shardRouting.primary() == false && removeClause.test(shardRouting)) { - builder.removeShard(shardRouting); - removed = true; - break; - } + findAndRemove: for (Predicate removeClause : PRIORITY_REMOVE_CLAUSES) { + for (int copy = 0; copy < indexShard.size(); copy++) { + ShardRouting shardRouting = indexShard.shard(copy); + if (shardRouting.primary() == false && removeClause.test(shardRouting)) { + builder.removeShard(shardRouting); + break findAndRemove; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 7cb809fc783e..3a5a369caa3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -45,23 +45,23 @@ public class IndexShardRoutingTable { final ShardShuffler shuffler; final ShardId shardId; - + final ShardRouting[] shards; final ShardRouting primary; final List replicas; - final ShardRouting[] shards; final List activeShards; final List assignedShards; - final boolean allShardsStarted; - /** * The initializing list, including ones that are initializing on a target node because of relocation. * If we can come up with a better variable name, it would be nice... */ final List allInitializingShards; + final boolean allShardsStarted; + final int activeSearchShardCount; + final int totalSearchShardCount; IndexShardRoutingTable(ShardId shardId, List shards) { - this.shardId = shardId; this.shuffler = new RotationShardShuffler(Randomness.get().nextInt()); + this.shardId = shardId; this.shards = shards.toArray(ShardRouting[]::new); ShardRouting primary = null; @@ -70,6 +70,8 @@ public class IndexShardRoutingTable { List assignedShards = new ArrayList<>(); List allInitializingShards = new ArrayList<>(); boolean allShardsStarted = true; + int activeSearchShardCount = 0; + int totalSearchShardCount = 0; for (ShardRouting shard : this.shards) { if (shard.primary()) { assert primary == null : "duplicate primary: " + primary + " vs " + shard; @@ -79,6 +81,12 @@ public class IndexShardRoutingTable { } if (shard.active()) { activeShards.add(shard); + if (shard.role().isSearchable()) { + activeSearchShardCount++; + } + } + if (shard.role().isSearchable()) { + totalSearchShardCount++; } if (shard.initializing()) { allInitializingShards.add(shard); @@ -97,12 +105,14 @@ public class IndexShardRoutingTable { allShardsStarted = false; } } - this.allShardsStarted = allShardsStarted; this.primary = primary; this.replicas = CollectionUtils.wrapUnmodifiableOrEmptySingleton(replicas); this.activeShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(activeShards); this.assignedShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(assignedShards); this.allInitializingShards = CollectionUtils.wrapUnmodifiableOrEmptySingleton(allInitializingShards); + this.allShardsStarted = allShardsStarted; + this.activeSearchShardCount = activeSearchShardCount; + this.totalSearchShardCount = totalSearchShardCount; } /** @@ -461,6 +471,24 @@ public boolean allShardsStarted() { return allShardsStarted; } + /** + * @return the count of active searchable shards + */ + public int getActiveSearchShardCount() { + return activeSearchShardCount; + } + + /** + * @return the total count of searchable shards + */ + public int getTotalSearchShardCount() { + return totalSearchShardCount; + } + + public boolean hasSearchShards() { + return totalSearchShardCount > 0; + } + @Nullable public ShardRouting getByAllocationId(String allocationId) { for (ShardRouting shardRouting : assignedShards()) { @@ -471,15 +499,17 @@ public ShardRouting getByAllocationId(String allocationId) { return null; } - public Set getAllAllocationIds() { + public Set getPromotableAllocationIds() { assert MasterService.assertNotMasterUpdateThread("not using this on the master thread so we don't have to pre-compute this"); Set allAllocationIds = new HashSet<>(); for (ShardRouting shard : shards) { - if (shard.relocating()) { - allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); - } - if (shard.assignedToNode()) { - allAllocationIds.add(shard.allocationId().getId()); + if (shard.isPromotableToPrimary()) { + if (shard.relocating()) { + allAllocationIds.add(shard.getTargetRelocatingShard().allocationId().getId()); + } + if (shard.assignedToNode()) { + allAllocationIds.add(shard.allocationId().getId()); + } } } return allAllocationIds; @@ -559,6 +589,7 @@ public IndexShardRoutingTable build() { // don't allow more than one shard copy with same id to be allocated to same node assert distinctNodes(shards) : "more than one shard with same id assigned to same node (shards: " + shards + ")"; assert noDuplicatePrimary(shards) : "expected but did not find unique primary in shard routing table: " + shards; + assert noAssignedReplicaWithoutActivePrimary(shards) : "unexpected assigned replica with no active primary: " + shards; return new IndexShardRoutingTable(shardId, shards); } @@ -589,9 +620,24 @@ static boolean noDuplicatePrimary(List shards) { seenPrimary = true; } } - // We should be able to return seenPrimary here, but in tests there are many routing tables with no primary (e.g. empty) so for - // now we leniently allow there to be no primary as well. TODO fix those tests and stop being lenient here. - return true; + return seenPrimary; + } + + static boolean noAssignedReplicaWithoutActivePrimary(List shards) { + boolean seenAssignedReplica = false; + for (final var shard : shards) { + if (shard.currentNodeId() != null) { + if (shard.primary()) { + if (shard.active()) { + return true; + } + } else { + seenAssignedReplica = true; + } + } + } + + return seenAssignedReplica == false; } public static IndexShardRoutingTable.Builder readFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index af6b27fef98f..53671458e0cf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -20,11 +20,11 @@ import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Stream; /** * A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards @@ -209,55 +209,6 @@ void remove(ShardRouting shard) { assert invariant(); } - /** - * Determine the number of shards with a specific state - * @param states set of states which should be counted - * @return number of shards - */ - public int numberOfShardsWithState(ShardRoutingState... states) { - if (states.length == 1) { - if (states[0] == ShardRoutingState.INITIALIZING) { - return initializingShards.size(); - } else if (states[0] == ShardRoutingState.RELOCATING) { - return relocatingShards.size(); - } else if (states[0] == ShardRoutingState.STARTED) { - return startedShards.size(); - } - } - - int count = 0; - for (ShardRouting shardEntry : this) { - for (ShardRoutingState state : states) { - if (shardEntry.state() == state) { - count++; - } - } - } - return count; - } - - /** - * Determine the shards with a specific state - * @param state state which should be listed - * @return List of shards - */ - public List shardsWithState(ShardRoutingState state) { - if (state == ShardRoutingState.INITIALIZING) { - return new ArrayList<>(initializingShards); - } else if (state == ShardRoutingState.RELOCATING) { - return new ArrayList<>(relocatingShards); - } else if (state == ShardRoutingState.STARTED) { - return new ArrayList<>(startedShards); - } - List shards = new ArrayList<>(); - for (ShardRouting shardEntry : this) { - if (shardEntry.state() == state) { - shards.add(shardEntry); - } - } - return shards; - } - private static final ShardRouting[] EMPTY_SHARD_ROUTING_ARRAY = new ShardRouting[0]; public ShardRouting[] initializing() { @@ -272,54 +223,45 @@ public ShardRouting[] started() { return startedShards.toArray(EMPTY_SHARD_ROUTING_ARRAY); } + /** + * Determine the number of shards with a specific state + * @param state which should be counted + * @return number of shards + */ + public int numberOfShardsWithState(ShardRoutingState state) { + return internalGetShardsWithState(state).size(); + } + + /** + * Determine the shards with a specific state + * @param state state which should be listed + * @return List of shards + */ + public Stream shardsWithState(ShardRoutingState state) { + return internalGetShardsWithState(state).stream(); + } + /** * Determine the shards of an index with a specific state * @param index id of the index * @param states set of states which should be listed * @return a list of shards */ - public List shardsWithState(String index, ShardRoutingState... states) { - List shards = new ArrayList<>(); - - if (states.length == 1) { - if (states[0] == ShardRoutingState.INITIALIZING) { - for (ShardRouting shardEntry : initializingShards) { - if (shardEntry.getIndexName().equals(index) == false) { - continue; - } - shards.add(shardEntry); - } - return shards; - } else if (states[0] == ShardRoutingState.RELOCATING) { - for (ShardRouting shardEntry : relocatingShards) { - if (shardEntry.getIndexName().equals(index) == false) { - continue; - } - shards.add(shardEntry); - } - return shards; - } else if (states[0] == ShardRoutingState.STARTED) { - for (ShardRouting shardEntry : startedShards) { - if (shardEntry.getIndexName().equals(index) == false) { - continue; - } - shards.add(shardEntry); - } - return shards; - } - } + public Stream shardsWithState(String index, ShardRoutingState... states) { + return Stream.of(states).flatMap(state -> shardsWithState(index, state)); + } - for (ShardRouting shardEntry : this) { - if (shardEntry.getIndexName().equals(index) == false) { - continue; - } - for (ShardRoutingState state : states) { - if (shardEntry.state() == state) { - shards.add(shardEntry); - } - } - } - return shards; + public Stream shardsWithState(String index, ShardRoutingState state) { + return shardsWithState(state).filter(shardRouting -> Objects.equals(shardRouting.getIndexName(), index)); + } + + private LinkedHashSet internalGetShardsWithState(ShardRoutingState state) { + return switch (state) { + case UNASSIGNED -> throw new IllegalArgumentException("Unassigned shards are not linked to a routing node"); + case INITIALIZING -> initializingShards; + case STARTED -> startedShards; + case RELOCATING -> relocatingShards; + }; } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index c5e90c2a0c11..5dda2a885a0b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -345,14 +345,12 @@ public ShardRouting activePrimary(ShardId shardId) { } /** - * Returns one active replica shard for the given shard id or null if - * no active replica is found. - * - * Since replicas could possibly be on nodes with a older version of ES than - * the primary is, this will return replicas on the highest version of ES. + * Returns one active and promotable replica shard for the given shard id or null if no active replica is found. * + * Since replicas could possibly be on nodes with a older version of ES than the primary is, this will return replicas on the highest + * version of ES. */ - public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { + public ShardRouting activePromotableReplicaWithHighestVersion(ShardId shardId) { // It's possible for replicaNodeVersion to be null, when disassociating dead nodes // that have been removed, the shards are failed, and part of the shard failing // calls this method with an out-of-date RoutingNodes, where the version might not @@ -361,6 +359,7 @@ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { return assignedShards(shardId).stream() .filter(shr -> shr.primary() == false && shr.active()) .filter(shr -> node(shr.currentNodeId()) != null) + .filter(ShardRouting::isPromotableToPrimary) .max( Comparator.comparing( shr -> node(shr.currentNodeId()).node(), @@ -655,17 +654,36 @@ assert node(failedShard.currentNodeId()).getByShardId(failedShard.shardId()) == } private void unassignPrimaryAndPromoteActiveReplicaIfExists( - ShardRouting failedShard, + ShardRouting failedPrimary, UnassignedInfo unassignedInfo, RoutingChangesObserver routingChangesObserver ) { - assert failedShard.primary(); - ShardRouting activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); - if (activeReplica == null) { - moveToUnassigned(failedShard, unassignedInfo); + assert failedPrimary.primary(); + ShardRouting replicaToPromote = activePromotableReplicaWithHighestVersion(failedPrimary.shardId()); + if (replicaToPromote == null) { + moveToUnassigned(failedPrimary, unassignedInfo); + for (ShardRouting unpromotableReplica : List.copyOf(assignedShards(failedPrimary.shardId()))) { + assert unpromotableReplica.primary() == false : unpromotableReplica; + assert unpromotableReplica.isPromotableToPrimary() == false : unpromotableReplica; + moveToUnassigned( + unpromotableReplica, + new UnassignedInfo( + UnassignedInfo.Reason.UNPROMOTABLE_REPLICA, + unassignedInfo.getMessage(), + unassignedInfo.getFailure(), + 0, + unassignedInfo.getUnassignedTimeInNanos(), + unassignedInfo.getUnassignedTimeInMillis(), + false, // TODO debatable, but do we want to delay reassignment of unpromotable replicas tho? + AllocationStatus.NO_ATTEMPT, + Set.of(), + unpromotableReplica.currentNodeId() + ) + ); + } } else { - movePrimaryToUnassignedAndDemoteToReplica(failedShard, unassignedInfo); - promoteReplicaToPrimary(activeReplica, routingChangesObserver); + movePrimaryToUnassignedAndDemoteToReplica(failedPrimary, unassignedInfo); + promoteReplicaToPrimary(replicaToPromote, routingChangesObserver); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 23d1528f72db..5596691fa3a3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -441,21 +441,40 @@ public static Builder builder(RoutingTable routingTable) { return new Builder(routingTable); } + public static Builder builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy) { + return new Builder(shardRoutingRoleStrategy); + } + + public static Builder builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy, RoutingTable routingTable) { + return new Builder(shardRoutingRoleStrategy, routingTable); + } + /** * Builder for the routing table. Note that build can only be called one time. */ public static class Builder { + private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; private long version; private ImmutableOpenMap.Builder indicesRouting; public Builder() { - indicesRouting = ImmutableOpenMap.builder(); + this(ShardRoutingRoleStrategy.NO_SHARD_CREATION); } public Builder(RoutingTable routingTable) { - version = routingTable.version; - indicesRouting = ImmutableOpenMap.builder(routingTable.indicesRouting); + this(ShardRoutingRoleStrategy.NO_SHARD_CREATION, routingTable); + } + + public Builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy) { + this.shardRoutingRoleStrategy = shardRoutingRoleStrategy; + this.indicesRouting = ImmutableOpenMap.builder(); + } + + public Builder(ShardRoutingRoleStrategy shardRoutingRoleStrategy, RoutingTable routingTable) { + this.shardRoutingRoleStrategy = shardRoutingRoleStrategy; + this.version = routingTable.version; + this.indicesRouting = ImmutableOpenMap.builder(routingTable.indicesRouting); } private static void addShard( @@ -463,7 +482,7 @@ private static void addShard( final ShardRouting shardRoutingEntry ) { Index index = shardRoutingEntry.index(); - indexRoutingTableBuilders.computeIfAbsent(index.getName(), idxName -> new IndexRoutingTable.Builder(index)) + indexRoutingTableBuilders.computeIfAbsent(index.getName(), idxName -> IndexRoutingTable.builder(index)) .addShard(shardRoutingEntry); } @@ -485,7 +504,7 @@ public Builder updateNumberOfReplicas(final int numberOfReplicas, final String[] continue; } int currentNumberOfReplicas = indexRoutingTable.shard(0).size() - 1; // remove the required primary - IndexRoutingTable.Builder builder = new IndexRoutingTable.Builder(indexRoutingTable.getIndex()); + IndexRoutingTable.Builder builder = new IndexRoutingTable.Builder(shardRoutingRoleStrategy, indexRoutingTable.getIndex()); // re-add all the shards builder.ensureShardArray(indexRoutingTable.size()); for (int i = 0; i < indexRoutingTable.size(); i++) { @@ -494,7 +513,7 @@ public Builder updateNumberOfReplicas(final int numberOfReplicas, final String[] if (currentNumberOfReplicas < numberOfReplicas) { // now, add "empty" ones for (int i = 0; i < (numberOfReplicas - currentNumberOfReplicas); i++) { - builder.addReplica(); + builder.addReplica(shardRoutingRoleStrategy.newReplicaRole()); } } else if (currentNumberOfReplicas > numberOfReplicas) { for (int i = 0; i < (currentNumberOfReplicas - numberOfReplicas); i++) { @@ -508,9 +527,10 @@ public Builder updateNumberOfReplicas(final int numberOfReplicas, final String[] public Builder addAsNew(IndexMetadata indexMetadata) { if (indexMetadata.getState() == IndexMetadata.State.OPEN) { - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()).initializeAsNew( - indexMetadata - ); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsNew(indexMetadata); add(indexRoutingBuilder); } return this; @@ -518,8 +538,10 @@ public Builder addAsNew(IndexMetadata indexMetadata) { public Builder addAsRecovery(IndexMetadata indexMetadata) { if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) { - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsRecovery(indexMetadata); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsRecovery(indexMetadata); add(indexRoutingBuilder); } return this; @@ -527,8 +549,10 @@ public Builder addAsRecovery(IndexMetadata indexMetadata) { public Builder addAsFromDangling(IndexMetadata indexMetadata) { if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) { - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsFromDangling(indexMetadata); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsFromDangling(indexMetadata); add(indexRoutingBuilder); } return this; @@ -536,8 +560,10 @@ public Builder addAsFromDangling(IndexMetadata indexMetadata) { public Builder addAsFromCloseToOpen(IndexMetadata indexMetadata) { if (indexMetadata.getState() == IndexMetadata.State.OPEN) { - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsFromCloseToOpen(indexMetadata, indicesRouting.get(indexMetadata.getIndex().getName())); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsFromCloseToOpen(indexMetadata, indicesRouting.get(indexMetadata.getIndex().getName())); add(indexRoutingBuilder); } return this; @@ -545,27 +571,27 @@ public Builder addAsFromCloseToOpen(IndexMetadata indexMetadata) { public Builder addAsFromOpenToClose(IndexMetadata indexMetadata) { assert isIndexVerifiedBeforeClosed(indexMetadata); - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsFromOpenToClose(indexMetadata, indicesRouting.get(indexMetadata.getIndex().getName())); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsFromOpenToClose(indexMetadata, indicesRouting.get(indexMetadata.getIndex().getName())); return add(indexRoutingBuilder); } public Builder addAsRestore(IndexMetadata indexMetadata, SnapshotRecoverySource recoverySource) { - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()).initializeAsRestore( - indexMetadata, - recoverySource, - indicesRouting.get(indexMetadata.getIndex().getName()) - ); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsRestore(indexMetadata, recoverySource, indicesRouting.get(indexMetadata.getIndex().getName())); add(indexRoutingBuilder); return this; } public Builder addAsNewRestore(IndexMetadata indexMetadata, SnapshotRecoverySource recoverySource, Set ignoreShards) { - IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()).initializeAsNewRestore( - indexMetadata, - recoverySource, - ignoreShards - ); + IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder( + shardRoutingRoleStrategy, + indexMetadata.getIndex() + ).initializeAsNewRestore(indexMetadata, recoverySource, ignoreShards); add(indexRoutingBuilder); return this; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index ac6dd661f046..37ae9784d9cf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -8,17 +8,19 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -36,8 +38,8 @@ public final class ShardRouting implements Writeable, ToXContentObject { * Used if shard size is not available */ public static final long UNAVAILABLE_EXPECTED_SHARD_SIZE = -1; - private static final Version EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION = Version.V_8_5_0; - private static final Version RELOCATION_FAILURE_INFO_VERSION = Version.V_8_6_0; + private static final TransportVersion EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION = TransportVersion.V_8_5_0; + private static final TransportVersion RELOCATION_FAILURE_INFO_VERSION = TransportVersion.V_8_6_0; private final ShardId shardId; private final String currentNodeId; @@ -60,6 +62,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { private final long expectedShardSize; @Nullable private final ShardRouting targetRelocatingShard; + private final Role role; /** * A constructor to internally create shard routing instances, note, the internal flag should only be set to true @@ -75,7 +78,8 @@ public final class ShardRouting implements Writeable, ToXContentObject { UnassignedInfo unassignedInfo, RelocationFailureInfo relocationFailureInfo, AllocationId allocationId, - long expectedShardSize + long expectedShardSize, + Role role ) { this.shardId = shardId; this.currentNodeId = currentNodeId; @@ -87,17 +91,45 @@ public final class ShardRouting implements Writeable, ToXContentObject { this.relocationFailureInfo = relocationFailureInfo; this.allocationId = allocationId; this.expectedShardSize = expectedShardSize; + this.role = role; this.targetRelocatingShard = initializeTargetRelocatingShard(); - assert (state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) == false : "unassigned shard must be created with meta"; + + assert assertConsistent(); + } + + private boolean assertConsistent() { assert relocationFailureInfo != null : "relocation failure info must be always set"; - assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) - : "recovery source only available on unassigned or initializing shard but was " + state; - assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary - : "replica shards always recover from primary"; - assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) - : "unassigned shard must not be assigned to a node " + this; - assert relocatingNodeId == null || state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING - : state + " shard must not have relocating node " + this; + assert role != null : "role must be always set"; + assert primary == false || role.isPromotableToPrimary() : "shard with unpromotable role was promoted to primary: " + this; + switch (state) { + case UNASSIGNED -> { + assert currentNodeId == null : state + " shard must not be assigned to a node " + this; + assert relocatingNodeId == null : state + " shard must not be relocating to a node " + this; + assert unassignedInfo != null : state + " shard must be created with unassigned info " + this; + assert recoverySource != null : state + " shard must be created with a recovery source" + this; + assert primary ^ recoverySource == PeerRecoverySource.INSTANCE : "replica shards always recover from primary" + this; + } + case INITIALIZING -> { + assert currentNodeId != null : state + " shard must be assigned to a node " + this; + // relocatingNodeId is not set for initializing shard but set for relocating shard counterpart + // unassignedInfo is kept after starting unassigned shard but not present for relocating shard counterpart + assert recoverySource != null : state + "shard must be created with a recovery source" + this; + assert primary || recoverySource == PeerRecoverySource.INSTANCE : "replica shards always recover from primary" + this; + } + case STARTED -> { + assert currentNodeId != null : state + " shard must be assigned to a node " + this; + assert relocatingNodeId == null : state + " shard must not be relocating to a node " + this; + assert unassignedInfo == null : state + " shard must be created without unassigned info " + this; + assert recoverySource == null : state + " shard must be created without a recovery source" + this; + } + case RELOCATING -> { + assert currentNodeId != null : state + " shard must be assigned to a node " + this; + assert relocatingNodeId != null : state + " shard must be relocating to a node " + this; + assert unassignedInfo == null : state + " shard must be created without unassigned info " + this; + assert recoverySource == null : state + " shard must be created without a recovery source" + this; + } + } + return true; } @Nullable @@ -113,7 +145,8 @@ private ShardRouting initializeTargetRelocatingShard() { unassignedInfo, RelocationFailureInfo.NO_FAILURES, AllocationId.newTargetRelocation(allocationId), - expectedShardSize + expectedShardSize, + role ); } else { return null; @@ -127,7 +160,8 @@ public static ShardRouting newUnassigned( ShardId shardId, boolean primary, RecoverySource recoverySource, - UnassignedInfo unassignedInfo + UnassignedInfo unassignedInfo, + Role role ) { return new ShardRouting( shardId, @@ -139,7 +173,8 @@ public static ShardRouting newUnassigned( unassignedInfo, RelocationFailureInfo.NO_FAILURES, null, - UNAVAILABLE_EXPECTED_SHARD_SIZE + UNAVAILABLE_EXPECTED_SHARD_SIZE, + role ); } @@ -304,7 +339,7 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { recoverySource = null; } unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new); - if (in.getVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) { + if (in.getTransportVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) { relocationFailureInfo = RelocationFailureInfo.readFrom(in); } else { relocationFailureInfo = RelocationFailureInfo.NO_FAILURES; @@ -312,11 +347,16 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { allocationId = in.readOptionalWriteable(AllocationId::new); if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING - || (state == ShardRoutingState.STARTED && in.getVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) { + || (state == ShardRoutingState.STARTED && in.getTransportVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) { expectedShardSize = in.readLong(); } else { expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; } + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + role = Role.readFrom(in); + } else { + role = Role.DEFAULT; + } targetRelocatingShard = initializeTargetRelocatingShard(); } @@ -339,15 +379,23 @@ public void writeToThin(StreamOutput out) throws IOException { recoverySource.writeTo(out); } out.writeOptionalWriteable(unassignedInfo); - if (out.getVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) { + if (out.getTransportVersion().onOrAfter(RELOCATION_FAILURE_INFO_VERSION)) { relocationFailureInfo.writeTo(out); } out.writeOptionalWriteable(allocationId); if (state == ShardRoutingState.RELOCATING || state == ShardRoutingState.INITIALIZING - || (state == ShardRoutingState.STARTED && out.getVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) { + || (state == ShardRoutingState.STARTED && out.getTransportVersion().onOrAfter(EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION))) { out.writeLong(expectedShardSize); } + + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + role.writeTo(out); + } else if (role != Role.DEFAULT) { + throw new IllegalStateException( + Strings.format("cannot send role [%s] with transport version [%s]", role, out.getTransportVersion()) + ); + } } @Override @@ -369,7 +417,8 @@ public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySour unassignedInfo, relocationFailureInfo, allocationId, - expectedShardSize + expectedShardSize, + role ); } @@ -385,7 +434,8 @@ public ShardRouting updateRelocationFailure(RelocationFailureInfo relocationFail unassignedInfo, relocationFailureInfo, allocationId, - expectedShardSize + expectedShardSize, + role ); } @@ -414,7 +464,8 @@ public ShardRouting moveToUnassigned(UnassignedInfo unassignedInfo) { unassignedInfo, RelocationFailureInfo.NO_FAILURES, null, - UNAVAILABLE_EXPECTED_SHARD_SIZE + UNAVAILABLE_EXPECTED_SHARD_SIZE, + role ); } @@ -442,7 +493,8 @@ public ShardRouting initialize(String nodeId, @Nullable String existingAllocatio unassignedInfo, RelocationFailureInfo.NO_FAILURES, allocationId, - expectedShardSize + expectedShardSize, + role ); } @@ -463,7 +515,8 @@ public ShardRouting relocate(String relocatingNodeId, long expectedShardSize) { null, relocationFailureInfo, AllocationId.newRelocation(allocationId), - expectedShardSize + expectedShardSize, + role ); } @@ -485,7 +538,8 @@ public ShardRouting cancelRelocation() { null, relocationFailureInfo.incFailedRelocations(), AllocationId.cancelRelocation(allocationId), - UNAVAILABLE_EXPECTED_SHARD_SIZE + UNAVAILABLE_EXPECTED_SHARD_SIZE, + role ); } @@ -509,7 +563,8 @@ public ShardRouting removeRelocationSource() { unassignedInfo, relocationFailureInfo, AllocationId.finishRelocation(allocationId), - expectedShardSize + expectedShardSize, + role ); } @@ -530,7 +585,8 @@ public ShardRouting reinitializeReplicaShard() { unassignedInfo, relocationFailureInfo, AllocationId.newInitializing(), - expectedShardSize + expectedShardSize, + role ); } @@ -557,7 +613,8 @@ public ShardRouting moveToStarted(long expectedShardSize) { null, RelocationFailureInfo.NO_FAILURES, allocationId, - expectedShardSize + expectedShardSize, + role ); } @@ -581,7 +638,8 @@ public ShardRouting moveActiveReplicaToPrimary() { unassignedInfo, relocationFailureInfo, allocationId, - expectedShardSize + expectedShardSize, + role ); } @@ -605,7 +663,8 @@ public ShardRouting moveUnassignedFromPrimary() { unassignedInfo, relocationFailureInfo, allocationId, - expectedShardSize + expectedShardSize, + role ); } @@ -734,7 +793,8 @@ public boolean equalsIgnoringMetadata(ShardRouting other) { && Objects.equals(relocatingNodeId, other.relocatingNodeId) && Objects.equals(allocationId, other.allocationId) && state == other.state - && Objects.equals(recoverySource, other.recoverySource); + && Objects.equals(recoverySource, other.recoverySource) + && role == other.role; } @Override @@ -770,6 +830,7 @@ public int hashCode() { h = 31 * h + (allocationId != null ? allocationId.hashCode() : 0); h = 31 * h + (unassignedInfo != null ? unassignedInfo.hashCode() : 0); h = 31 * h + (relocationFailureInfo != null ? relocationFailureInfo.hashCode() : 0); + h = 31 * h + role.hashCode(); hashCode = h; } return h; @@ -790,6 +851,9 @@ public String shortSummary() { if (relocatingNodeId != null) { sb.append("relocating [").append(relocatingNodeId).append("], "); } + if (role != Role.DEFAULT) { + sb.append("[").append(role).append("], "); + } if (primary) { sb.append("[P]"); } else { @@ -835,6 +899,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws unassignedInfo.toXContent(builder, params); } relocationFailureInfo.toXContent(builder, params); + role.toXContent(builder, params); return builder.endObject(); } @@ -855,4 +920,66 @@ public long getExpectedShardSize() { public RecoverySource recoverySource() { return recoverySource; } + + public Role role() { + return role; + } + + public boolean isPromotableToPrimary() { + return role.isPromotableToPrimary(); + } + + public boolean isSearchable() { + return role.isSearchable(); + } + + public enum Role implements Writeable, ToXContentFragment { + DEFAULT((byte) 0, true, true), + INDEX_ONLY((byte) 1, true, false), + SEARCH_ONLY((byte) 2, false, true); + + private final byte code; + private final boolean promotable; + private final boolean searchable; + + Role(byte code, boolean promotable, boolean searchable) { + this.code = code; + this.promotable = promotable; + this.searchable = searchable; + } + + /** + * @return whether a shard copy with this role may be promoted from replica to primary. If {@code index.number_of_replicas} is + * reduced, unpromotable replicas are removed first. + */ + public boolean isPromotableToPrimary() { + return promotable; + } + + /** + * @return whether a shard copy with this role may be the target of a search. + */ + public boolean isSearchable() { + return searchable; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return this == DEFAULT ? builder : builder.field("role", toString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(code); + } + + public static Role readFrom(StreamInput in) throws IOException { + return switch (in.readByte()) { + case 0 -> DEFAULT; + case 1 -> INDEX_ONLY; + case 2 -> SEARCH_ONLY; + default -> throw new IllegalStateException("unknown role"); + }; + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingRoleStrategy.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingRoleStrategy.java new file mode 100644 index 000000000000..e136333e7e39 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingRoleStrategy.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing; + +public interface ShardRoutingRoleStrategy { + + /** + * @return the role for a copy of a new empty shard, where {@code copyIndex} is the index of the copy ({@code 0} for the primary and + * {@code 1..N} for replicas). + */ + ShardRouting.Role newEmptyRole(int copyIndex); + + /** + * @return the role for a new replica copy of an existing shard. + */ + ShardRouting.Role newReplicaRole(); + + /** + * @return the role for a copy of a new shard being restored from snapshot, where {@code copyIndex} is the index of the copy ({@code 0} + * for the primary and {@code 1..N} for replicas). + */ + default ShardRouting.Role newRestoredRole(int copyIndex) { + return newEmptyRole(copyIndex); + } + + /** + * A strategy that refuses to create any new shard copies, which is used (for instance) when reading shard copies from a remote node. + */ + ShardRoutingRoleStrategy NO_SHARD_CREATION = new ShardRoutingRoleStrategy() { + @Override + public ShardRouting.Role newEmptyRole(int copyIndex) { + return newReplicaRole(); + } + + @Override + public ShardRouting.Role newReplicaRole() { + assert false : "no shard creation permitted"; + throw new IllegalStateException("no shard creation permitted"); + } + }; +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 95e442b14055..f20ef4134243 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -9,7 +9,7 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; @@ -47,7 +47,8 @@ public final class UnassignedInfo implements ToXContentFragment, Writeable { * The version that the {@code lastAllocatedNode} field was added in. Used to adapt streaming of this class as appropriate for the * version of the node sending/receiving it. Should be removed once wire compatibility with this version is no longer necessary. */ - private static final Version VERSION_LAST_ALLOCATED_NODE_ADDED = Version.V_7_15_0; + private static final TransportVersion VERSION_LAST_ALLOCATED_NODE_ADDED = TransportVersion.V_7_15_0; + private static final TransportVersion VERSION_UNPROMOTABLE_REPLICA_ADDED = TransportVersion.V_8_7_0; public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(ZoneOffset.UTC); @@ -133,7 +134,11 @@ public enum Reason { * Similar to NODE_LEFT, but at the time the node left, it had been registered for a restart via the Node Shutdown API. Note that * there is no verification that it was ready to be restarted, so this may be an intentional restart or a node crash. */ - NODE_RESTARTING + NODE_RESTARTING, + /** + * Replica is unpromotable and the primary failed. + */ + UNPROMOTABLE_REPLICA } /** @@ -297,7 +302,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - if (in.getVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { + if (in.getTransportVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { this.lastAllocatedNodeId = in.readOptionalString(); } else { this.lastAllocatedNodeId = null; @@ -305,8 +310,10 @@ public UnassignedInfo(StreamInput in) throws IOException { } public void writeTo(StreamOutput out) throws IOException { - if (reason.equals(Reason.NODE_RESTARTING) && out.getVersion().before(VERSION_LAST_ALLOCATED_NODE_ADDED)) { + if (reason.equals(Reason.NODE_RESTARTING) && out.getTransportVersion().before(VERSION_LAST_ALLOCATED_NODE_ADDED)) { out.writeByte((byte) Reason.NODE_LEFT.ordinal()); + } else if (reason.equals(Reason.UNPROMOTABLE_REPLICA) && out.getTransportVersion().before(VERSION_UNPROMOTABLE_REPLICA_ADDED)) { + out.writeByte((byte) Reason.PRIMARY_FAILED.ordinal()); } else { out.writeByte((byte) reason.ordinal()); } @@ -318,7 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); out.writeCollection(failedNodeIds, StreamOutput::writeString); - if (out.getVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { + if (out.getTransportVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { out.writeOptionalString(lastAllocatedNodeId); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 6038bb25ca16..b9431045172e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -76,6 +77,7 @@ public class AllocationService { private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; private final SnapshotsInfoService snapshotsInfoService; + private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; // only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator public AllocationService( @@ -83,9 +85,10 @@ public AllocationService( GatewayAllocator gatewayAllocator, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService, - SnapshotsInfoService snapshotsInfoService + SnapshotsInfoService snapshotsInfoService, + ShardRoutingRoleStrategy shardRoutingRoleStrategy ) { - this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService); + this(allocationDeciders, shardsAllocator, clusterInfoService, snapshotsInfoService, shardRoutingRoleStrategy); setExistingShardsAllocators(Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator)); } @@ -93,12 +96,14 @@ public AllocationService( AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService, - SnapshotsInfoService snapshotsInfoService + SnapshotsInfoService snapshotsInfoService, + ShardRoutingRoleStrategy shardRoutingRoleStrategy ) { this.allocationDeciders = allocationDeciders; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; this.snapshotsInfoService = snapshotsInfoService; + this.shardRoutingRoleStrategy = shardRoutingRoleStrategy; } /** @@ -117,6 +122,10 @@ public AllocationDeciders getAllocationDeciders() { return allocationDeciders; } + public ShardRoutingRoleStrategy getShardRoutingRoleStrategy() { + return shardRoutingRoleStrategy; + } + /** * Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be * provided as parameter and no duplicates should be contained. @@ -297,7 +306,7 @@ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { if (autoExpandReplicaChanges.isEmpty()) { return clusterState; } else { - final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, clusterState.routingTable()); final Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); for (Map.Entry> entry : autoExpandReplicaChanges.entrySet()) { final int numberOfReplicas = entry.getKey(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index f5a4e1ff3a1e..86a8f20115c5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -11,7 +11,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; @@ -31,6 +32,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Releasable; import java.util.ArrayList; import java.util.Collections; @@ -301,123 +303,109 @@ public void onNewInfo(ClusterInfo info) { } } - final ActionListener listener = new GroupedActionListener<>(ActionListener.wrap(this::checkFinished), 3); - - if (reroute) { - logger.debug("rerouting shards: [{}]", explanation); - rerouteService.reroute("disk threshold monitor", Priority.HIGH, ActionListener.wrap(reroutedClusterState -> { - - for (DiskUsage diskUsage : usagesOverHighThreshold) { - final RoutingNode routingNode = reroutedClusterState.getRoutingNodes().node(diskUsage.getNodeId()); - final DiskUsage usageIncludingRelocations; - final long relocatingShardsSize; - if (routingNode != null) { // might be temporarily null if the ClusterInfoService and the ClusterService are out of step - relocatingShardsSize = sizeOfRelocatingShards(routingNode, diskUsage, info, reroutedClusterState); - usageIncludingRelocations = new DiskUsage( - diskUsage.getNodeId(), - diskUsage.getNodeName(), - diskUsage.getPath(), - diskUsage.getTotalBytes(), - diskUsage.getFreeBytes() - relocatingShardsSize - ); - } else { - usageIncludingRelocations = diskUsage; - relocatingShardsSize = 0L; - } - final ByteSizeValue total = ByteSizeValue.ofBytes(usageIncludingRelocations.getTotalBytes()); - - if (usageIncludingRelocations.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total).getBytes()) { - nodesOverHighThresholdAndRelocating.remove(diskUsage.getNodeId()); - logger.warn( - "high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; " - + "currently relocating away shards totalling [{}] bytes; the node is expected to continue to exceed " - + "the high disk watermark when these relocations are complete", - diskThresholdSettings.describeHighThreshold(total, false), - diskUsage, - -relocatingShardsSize - ); - } else if (nodesOverHighThresholdAndRelocating.add(diskUsage.getNodeId())) { - logger.info( - "high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; " - + "currently relocating away shards totalling [{}] bytes; the node is expected to be below the high " - + "disk watermark when these relocations are complete", - diskThresholdSettings.describeHighThreshold(total, false), - diskUsage, - -relocatingShardsSize - ); - } else { - logger.debug( - "high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; " - + "currently relocating away shards totalling [{}] bytes", - diskThresholdSettings.describeHighThreshold(total, false), - diskUsage, - -relocatingShardsSize - ); - } - } - - setLastRunTimeMillis(); - listener.onResponse(null); - }, e -> { - logger.debug("reroute failed", e); - setLastRunTimeMillis(); - listener.onFailure(e); - })); - } else { - logger.trace("no reroute required"); - listener.onResponse(null); - } - - // Generate a map of node name to ID so we can use it to look up node replacement targets - final Map nodeNameToId = state.getRoutingNodes() - .stream() - .collect(Collectors.toMap(rn -> rn.node().getName(), RoutingNode::nodeId, (s1, s2) -> s2)); + try (var asyncRefs = new RefCountingRunnable(this::checkFinished)) { + + if (reroute) { + logger.debug("rerouting shards: [{}]", explanation); + rerouteService.reroute( + "disk threshold monitor", + Priority.HIGH, + ActionListener.releaseAfter(ActionListener.runAfter(ActionListener.wrap(reroutedClusterState -> { + + for (DiskUsage diskUsage : usagesOverHighThreshold) { + final RoutingNode routingNode = reroutedClusterState.getRoutingNodes().node(diskUsage.getNodeId()); + final DiskUsage usageIncludingRelocations; + final long relocatingShardsSize; + if (routingNode != null) { // might be temporarily null if ClusterInfoService and ClusterService are out of step + relocatingShardsSize = sizeOfRelocatingShards(routingNode, diskUsage, info, reroutedClusterState); + usageIncludingRelocations = new DiskUsage( + diskUsage.getNodeId(), + diskUsage.getNodeName(), + diskUsage.getPath(), + diskUsage.getTotalBytes(), + diskUsage.getFreeBytes() - relocatingShardsSize + ); + } else { + usageIncludingRelocations = diskUsage; + relocatingShardsSize = 0L; + } + final ByteSizeValue total = ByteSizeValue.ofBytes(usageIncludingRelocations.getTotalBytes()); + + if (usageIncludingRelocations.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHighStage(total) + .getBytes()) { + nodesOverHighThresholdAndRelocating.remove(diskUsage.getNodeId()); + logger.warn(""" + high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; currently \ + relocating away shards totalling [{}] bytes; the node is expected to continue to exceed the high disk \ + watermark when these relocations are complete\ + """, diskThresholdSettings.describeHighThreshold(total, false), diskUsage, -relocatingShardsSize); + } else if (nodesOverHighThresholdAndRelocating.add(diskUsage.getNodeId())) { + logger.info(""" + high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; currently \ + relocating away shards totalling [{}] bytes; the node is expected to be below the high disk watermark \ + when these relocations are complete\ + """, diskThresholdSettings.describeHighThreshold(total, false), diskUsage, -relocatingShardsSize); + } else { + logger.debug(""" + high disk watermark [{}] exceeded on {}, shards will be relocated away from this node; currently \ + relocating away shards totalling [{}] bytes\ + """, diskThresholdSettings.describeHighThreshold(total, false), diskUsage, -relocatingShardsSize); + } + } + }, e -> logger.debug("reroute failed", e)), this::setLastRunTimeMillis), asyncRefs.acquire()) + ); + } else { + logger.trace("no reroute required"); + } - // Calculate both the source node id and the target node id of a "replace" type shutdown - final Set nodesIdsPartOfReplacement = state.metadata() - .nodeShutdowns() - .values() - .stream() - .filter(meta -> meta.getType() == SingleNodeShutdownMetadata.Type.REPLACE) - .flatMap(meta -> Stream.of(meta.getNodeId(), nodeNameToId.get(meta.getTargetNodeName()))) - .collect(Collectors.toSet()); - - // Generate a set of all the indices that exist on either the target or source of a node replacement - final Set indicesOnReplaceSourceOrTarget = new HashSet<>(); - for (String nodeId : nodesIdsPartOfReplacement) { - for (ShardRouting shardRouting : state.getRoutingNodes().node(nodeId)) { - indicesOnReplaceSourceOrTarget.add(shardRouting.index().getName()); + // Generate a map of node name to ID so we can use it to look up node replacement targets + final Map nodeNameToId = state.getRoutingNodes() + .stream() + .collect(Collectors.toMap(rn -> rn.node().getName(), RoutingNode::nodeId, (s1, s2) -> s2)); + + // Calculate both the source node id and the target node id of a "replace" type shutdown + final Set nodesIdsPartOfReplacement = state.metadata() + .nodeShutdowns() + .values() + .stream() + .filter(meta -> meta.getType() == SingleNodeShutdownMetadata.Type.REPLACE) + .flatMap(meta -> Stream.of(meta.getNodeId(), nodeNameToId.get(meta.getTargetNodeName()))) + .collect(Collectors.toSet()); + + // Generate a set of all the indices that exist on either the target or source of a node replacement + final Set indicesOnReplaceSourceOrTarget = new HashSet<>(); + for (String nodeId : nodesIdsPartOfReplacement) { + for (ShardRouting shardRouting : state.getRoutingNodes().node(nodeId)) { + indicesOnReplaceSourceOrTarget.add(shardRouting.index().getName()); + } } - } - final Set indicesToAutoRelease = state.routingTable() - .indicesRouting() - .keySet() - .stream() - .filter(index -> indicesNotToAutoRelease.contains(index) == false) - .filter(index -> state.getBlocks().hasIndexBlock(index, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) - // Do not auto release indices that are on either the source or the target of a node replacement - .filter(index -> indicesOnReplaceSourceOrTarget.contains(index) == false) - .collect(Collectors.toSet()); - - if (indicesToAutoRelease.isEmpty() == false) { - logger.info( - "releasing read-only block on indices " - + indicesToAutoRelease - + " since they are now allocated to nodes with sufficient disk space" - ); - updateIndicesReadOnly(indicesToAutoRelease, listener, false); - } else { - logger.trace("no auto-release required"); - listener.onResponse(null); - } + final Set indicesToAutoRelease = state.routingTable() + .indicesRouting() + .keySet() + .stream() + .filter(index -> indicesNotToAutoRelease.contains(index) == false) + .filter(index -> state.getBlocks().hasIndexBlock(index, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) + // Do not auto release indices that are on either the source or the target of a node replacement + .filter(index -> indicesOnReplaceSourceOrTarget.contains(index) == false) + .collect(Collectors.toSet()); + + if (indicesToAutoRelease.isEmpty() == false) { + logger.info( + "releasing read-only block on indices " + + indicesToAutoRelease + + " since they are now allocated to nodes with sufficient disk space" + ); + updateIndicesReadOnly(indicesToAutoRelease, asyncRefs.acquire(), false); + } else { + logger.trace("no auto-release required"); + } - indicesToMarkReadOnly.removeIf(index -> state.getBlocks().indexBlocked(ClusterBlockLevel.WRITE, index)); - logger.trace("marking indices as read-only: [{}]", indicesToMarkReadOnly); - if (indicesToMarkReadOnly.isEmpty() == false) { - updateIndicesReadOnly(indicesToMarkReadOnly, listener, true); - } else { - listener.onResponse(null); + indicesToMarkReadOnly.removeIf(index -> state.getBlocks().indexBlocked(ClusterBlockLevel.WRITE, index)); + logger.trace("marking indices as read-only: [{}]", indicesToMarkReadOnly); + if (indicesToMarkReadOnly.isEmpty() == false) { + updateIndicesReadOnly(indicesToMarkReadOnly, asyncRefs.acquire(), true); + } } } @@ -453,23 +441,24 @@ private void setLastRunTimeMillis() { lastRunTimeMillis.getAndUpdate(l -> Math.max(l, currentTimeMillisSupplier.getAsLong())); } - protected void updateIndicesReadOnly(Set indicesToUpdate, ActionListener listener, boolean readOnly) { + protected void updateIndicesReadOnly(Set indicesToUpdate, Releasable onCompletion, boolean readOnly) { // set read-only block but don't block on the response - ActionListener wrappedListener = ActionListener.wrap(r -> { - setLastRunTimeMillis(); - listener.onResponse(r); - }, e -> { - logger.debug(() -> "setting indices [" + readOnly + "] read-only failed", e); - setLastRunTimeMillis(); - listener.onFailure(e); - }); Settings readOnlySettings = readOnly ? READ_ONLY_ALLOW_DELETE_SETTINGS : NOT_READ_ONLY_ALLOW_DELETE_SETTINGS; client.admin() .indices() .prepareUpdateSettings(indicesToUpdate.toArray(Strings.EMPTY_ARRAY)) .setSettings(readOnlySettings) .origin("disk-threshold-monitor") - .execute(wrappedListener.map(r -> null)); + .execute( + ActionListener.releaseAfter( + ActionListener.runAfter( + ActionListener.noop() + .delegateResponse((l, e) -> logger.debug(() -> "setting indices [" + readOnly + "] read-only failed", e)), + this::setLastRunTimeMillis + ), + onCompletion + ) + ); } private void removeExistingIndexBlocks() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java index 469e7f7efe36..e0b53e312e40 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -69,12 +69,14 @@ public void shardStarted(ShardRouting initializingShard, ShardRouting startedSha + "] and startedShard.allocationId [" + startedShard.allocationId().getId() + "] have to have the same"; - Updates updates = changes(startedShard.shardId()); - updates.addedAllocationIds.add(startedShard.allocationId().getId()); - if (startedShard.primary() - // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state - && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { - updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID); + if (startedShard.isPromotableToPrimary()) { + Updates updates = changes(startedShard.shardId()); + updates.addedAllocationIds.add(startedShard.allocationId().getId()); + if (startedShard.primary() + // started shard has to have null recoverySource; have to pick up recoverySource from its initializing state + && (initializingShard.recoverySource() == RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE)) { + updates.removedAllocationIds.add(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index ecc6fb295fea..7846cbacb90d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -407,7 +407,17 @@ public void setSimulatedClusterInfo(ClusterInfo clusterInfo) { } public RoutingAllocation immutableClone() { - return new RoutingAllocation(deciders, clusterState, clusterInfo, shardSizeInfo, currentNanoTime); + return new RoutingAllocation( + deciders, + routingNodesChanged() + ? ClusterState.builder(clusterState) + .routingTable(RoutingTable.of(clusterState.routingTable().version(), routingNodes)) + .build() + : clusterState, + clusterInfo, + shardSizeInfo, + currentNanoTime + ); } public RoutingAllocation mutableCloneForSimulation() { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java index 2efe60c65146..51e53d086739 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java @@ -43,6 +43,7 @@ import org.elasticsearch.health.ImpactArea; import org.elasticsearch.health.SimpleHealthIndicatorDetails; import org.elasticsearch.health.node.HealthInfo; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import java.util.ArrayList; @@ -59,6 +60,8 @@ import java.util.stream.Stream; import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.toMap; +import static java.util.stream.Collectors.toSet; import static org.elasticsearch.cluster.health.ClusterShardHealth.getInactivePrimaryHealth; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING; @@ -67,6 +70,7 @@ import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING; +import static org.elasticsearch.health.Diagnosis.Resource.Type.FEATURE_STATE; import static org.elasticsearch.health.Diagnosis.Resource.Type.INDEX; import static org.elasticsearch.health.HealthStatus.GREEN; import static org.elasticsearch.health.HealthStatus.RED; @@ -96,9 +100,16 @@ public class ShardsAvailabilityHealthIndicatorService implements HealthIndicator private final ClusterService clusterService; private final AllocationService allocationService; - public ShardsAvailabilityHealthIndicatorService(ClusterService clusterService, AllocationService allocationService) { + private final SystemIndices systemIndices; + + public ShardsAvailabilityHealthIndicatorService( + ClusterService clusterService, + AllocationService allocationService, + SystemIndices systemIndices + ) { this.clusterService = clusterService; this.allocationService = allocationService; + this.systemIndices = systemIndices; } @Override @@ -107,7 +118,7 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { var state = clusterService.state(); var shutdown = state.getMetadata().custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY); var status = new ShardAllocationStatus(state.getMetadata()); @@ -126,7 +137,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { status.getSymptom(), status.getDetails(verbose), status.getImpacts(), - status.getDiagnosis(verbose) + status.getDiagnosis(verbose, maxAffectedResourcesCount) ); } @@ -134,7 +145,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { public static final String PRIMARY_UNASSIGNED_IMPACT_ID = "primary_unassigned"; public static final String REPLICA_UNASSIGNED_IMPACT_ID = "replica_unassigned"; - public static final String RESTORE_FROM_SNAPSHOT_ACTION_GUIDE = "http://ela.st/restore-snapshot"; + public static final String RESTORE_FROM_SNAPSHOT_ACTION_GUIDE = "https://ela.st/restore-snapshot"; public static final Diagnosis.Definition ACTION_RESTORE_FROM_SNAPSHOT = new Diagnosis.Definition( NAME, "restore_from_snapshot", @@ -144,7 +155,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { RESTORE_FROM_SNAPSHOT_ACTION_GUIDE ); - public static final String DIAGNOSE_SHARDS_ACTION_GUIDE = "http://ela.st/diagnose-shards"; + public static final String DIAGNOSE_SHARDS_ACTION_GUIDE = "https://ela.st/diagnose-shards"; public static final Diagnosis.Definition ACTION_CHECK_ALLOCATION_EXPLAIN_API = new Diagnosis.Definition( NAME, "explain_allocations", @@ -155,7 +166,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { DIAGNOSE_SHARDS_ACTION_GUIDE ); - public static final String FIX_DELAYED_SHARDS_GUIDE = "http://ela.st/fix-delayed-shard-allocation"; + public static final String FIX_DELAYED_SHARDS_GUIDE = "https://ela.st/fix-delayed-shard-allocation"; public static final Diagnosis.Definition DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS = new Diagnosis.Definition( NAME, "delayed_shard_allocations", @@ -166,7 +177,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { FIX_DELAYED_SHARDS_GUIDE ); - public static final String ENABLE_INDEX_ALLOCATION_GUIDE = "http://ela.st/fix-index-allocation"; + public static final String ENABLE_INDEX_ALLOCATION_GUIDE = "https://ela.st/fix-index-allocation"; public static final Diagnosis.Definition ACTION_ENABLE_INDEX_ROUTING_ALLOCATION = new Diagnosis.Definition( NAME, "enable_index_allocations", @@ -179,7 +190,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + "].", ENABLE_INDEX_ALLOCATION_GUIDE ); - public static final String ENABLE_CLUSTER_ALLOCATION_ACTION_GUIDE = "http://ela.st/fix-cluster-allocation"; + public static final String ENABLE_CLUSTER_ALLOCATION_ACTION_GUIDE = "https://ela.st/fix-cluster-allocation"; public static final Diagnosis.Definition ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION = new Diagnosis.Definition( NAME, "enable_cluster_allocations", @@ -193,7 +204,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { ENABLE_CLUSTER_ALLOCATION_ACTION_GUIDE ); - public static final String ENABLE_TIER_ACTION_GUIDE = "http://ela.st/enable-tier"; + public static final String ENABLE_TIER_ACTION_GUIDE = "https://ela.st/enable-tier"; public static final Map ACTION_ENABLE_TIERS_LOOKUP = DataTier.ALL_DATA_TIERS.stream() .collect( Collectors.toUnmodifiableMap( @@ -209,7 +220,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { ) ); - public static final String INCREASE_SHARD_LIMIT_ACTION_GUIDE = "http://ela.st/index-total-shards"; + public static final String INCREASE_SHARD_LIMIT_ACTION_GUIDE = "https://ela.st/index-total-shards"; public static final Diagnosis.Definition ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING = new Diagnosis.Definition( NAME, "increase_shard_limit_index_setting", @@ -240,7 +251,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { ) ); - public static final String INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE = "http://ela.st/cluster-total-shards"; + public static final String INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE = "https://ela.st/cluster-total-shards"; public static final Diagnosis.Definition ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING = new Diagnosis.Definition( NAME, "increase_shard_limit_cluster_setting", @@ -271,7 +282,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { ) ); - public static final String MIGRATE_TO_TIERS_ACTION_GUIDE = "http://ela.st/migrate-to-tiers"; + public static final String MIGRATE_TO_TIERS_ACTION_GUIDE = "https://ela.st/migrate-to-tiers"; public static final Diagnosis.Definition ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA = new Diagnosis.Definition( NAME, "migrate_data_tiers_require_data", @@ -340,7 +351,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { ) ); - public static final String TIER_CAPACITY_ACTION_GUIDE = "http://ela.st/tier-capacity"; + public static final String TIER_CAPACITY_ACTION_GUIDE = "https://ela.st/tier-capacity"; public static final Diagnosis.Definition ACTION_INCREASE_NODE_CAPACITY = new Diagnosis.Definition( NAME, "increase_node_capacity_for_allocations", @@ -441,9 +452,7 @@ List diagnoseUnassignedShardRouting(ShardRouting shardRout LOGGER.trace("Diagnosing unassigned shard [{}] due to reason [{}]", shardRouting.shardId(), shardRouting.unassignedInfo()); switch (shardRouting.unassignedInfo().getLastAllocationStatus()) { case NO_VALID_SHARD_COPY: - if (UnassignedInfo.Reason.NODE_LEFT == shardRouting.unassignedInfo().getReason()) { - diagnosisDefs.add(ACTION_RESTORE_FROM_SNAPSHOT); - } + diagnosisDefs.add(ACTION_RESTORE_FROM_SNAPSHOT); break; case NO_ATTEMPT: if (shardRouting.unassignedInfo().isDelayed()) { @@ -762,7 +771,7 @@ private Optional checkNotEnoughNodesInDataTier( } } - private class ShardAllocationStatus { + class ShardAllocationStatus { private final ShardAllocationCounts primaries = new ShardAllocationCounts(); private final ShardAllocationCounts replicas = new ShardAllocationCounts(); private final Metadata clusterMetadata; @@ -893,9 +902,10 @@ public List getImpacts() { /** * Returns the diagnosis for unassigned primary and replica shards. * @param verbose true if the diagnosis should be generated, false if they should be omitted. + * @param maxAffectedResourcesCount the max number of affected resources to be returned as part of the diagnosis * @return The diagnoses list the indicator identified. Alternatively, an empty list if none were found or verbose is false. */ - public List getDiagnosis(boolean verbose) { + public List getDiagnosis(boolean verbose, int maxAffectedResourcesCount) { if (verbose) { Map> diagnosisToAffectedIndices = new HashMap<>(primaries.diagnosisDefinitions); replicas.diagnosisDefinitions.forEach((diagnosisDef, indicesWithReplicasUnassigned) -> { @@ -909,27 +919,108 @@ public List getDiagnosis(boolean verbose) { if (diagnosisToAffectedIndices.isEmpty()) { return List.of(); } else { - return diagnosisToAffectedIndices.entrySet() - .stream() - .map( - e -> new Diagnosis( - e.getKey(), - List.of( - new Diagnosis.Resource( - INDEX, - e.getValue() - .stream() - .sorted(indicesComparatorByPriorityAndName(clusterMetadata)) - .collect(Collectors.toList()) - ) + + return diagnosisToAffectedIndices.entrySet().stream().map(e -> { + List affectedResources = new ArrayList<>(1); + if (e.getKey().equals(ACTION_RESTORE_FROM_SNAPSHOT)) { + Set restoreFromSnapshotIndices = e.getValue(); + if (restoreFromSnapshotIndices != null && restoreFromSnapshotIndices.isEmpty() == false) { + affectedResources = getRestoreFromSnapshotAffectedResources( + clusterMetadata, + systemIndices, + restoreFromSnapshotIndices, + maxAffectedResourcesCount + ); + } + } else { + affectedResources.add( + new Diagnosis.Resource( + INDEX, + e.getValue() + .stream() + .sorted(indicesComparatorByPriorityAndName(clusterMetadata)) + .limit(Math.min(e.getValue().size(), maxAffectedResourcesCount)) + .collect(Collectors.toList()) ) - ) - ) - .collect(Collectors.toList()); + ); + } + return new Diagnosis(e.getKey(), affectedResources); + }).collect(Collectors.toList()); } } else { return List.of(); } } + + /** + * The restore from snapshot operation requires the user to specify indices and feature states. + * The indices that are part of the feature states must not be specified. This method loops through all the + * identified unassigned indices and returns the affected {@link Diagnosis.Resource}s of type `INDEX` + * and if applicable `FEATURE_STATE` + */ + static List getRestoreFromSnapshotAffectedResources( + Metadata metadata, + SystemIndices systemIndices, + Set restoreFromSnapshotIndices, + int maxAffectedResourcesCount + ) { + List affectedResources = new ArrayList<>(2); + + Set affectedIndices = new HashSet<>(restoreFromSnapshotIndices); + Set affectedFeatureStates = new HashSet<>(); + Map> featureToSystemIndices = systemIndices.getFeatures() + .stream() + .collect( + toMap( + SystemIndices.Feature::getName, + feature -> feature.getIndexDescriptors() + .stream() + .flatMap(descriptor -> descriptor.getMatchingIndices(metadata).stream()) + .collect(toSet()) + ) + ); + + for (Map.Entry> featureToIndices : featureToSystemIndices.entrySet()) { + for (String featureIndex : featureToIndices.getValue()) { + if (restoreFromSnapshotIndices.contains(featureIndex)) { + affectedFeatureStates.add(featureToIndices.getKey()); + affectedIndices.remove(featureIndex); + } + } + } + + Map> featureToDsBackingIndices = systemIndices.getFeatures() + .stream() + .collect( + toMap( + SystemIndices.Feature::getName, + feature -> feature.getDataStreamDescriptors() + .stream() + .flatMap(descriptor -> descriptor.getBackingIndexNames(metadata).stream()) + .collect(toSet()) + ) + ); + + // the shards_availability indicator works with indices so let's remove the feature states data streams backing indices from + // the list of affected indices (the feature state will cover the restore of these indices too) + for (Map.Entry> featureToBackingIndices : featureToDsBackingIndices.entrySet()) { + for (String featureIndex : featureToBackingIndices.getValue()) { + if (restoreFromSnapshotIndices.contains(featureIndex)) { + affectedFeatureStates.add(featureToBackingIndices.getKey()); + affectedIndices.remove(featureIndex); + } + } + } + + if (affectedIndices.isEmpty() == false) { + affectedResources.add(new Diagnosis.Resource(INDEX, affectedIndices.stream().limit(maxAffectedResourcesCount).toList())); + } + if (affectedFeatureStates.isEmpty() == false) { + affectedResources.add( + new Diagnosis.Resource(FEATURE_STATE, affectedFeatureStates.stream().limit(maxAffectedResourcesCount).toList()) + ); + } + return affectedResources; + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index aaaa4cd26b04..2aaecfdcda7a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -34,10 +34,13 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; import org.elasticsearch.gateway.PriorityComparator; @@ -50,13 +53,12 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.OptionalLong; import java.util.Set; import java.util.function.BiFunction; -import java.util.function.Consumer; import java.util.stream.StreamSupport; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; /** * The {@link BalancedShardsAllocator} re-balances the nodes allocations @@ -100,7 +102,7 @@ public class BalancedShardsAllocator implements ShardsAllocator { ); public static final Setting DISK_USAGE_BALANCE_FACTOR_SETTING = Setting.floatSetting( "cluster.routing.allocation.balance.disk_usage", - 0.0f, + 2e-11f, 0.0f, Property.Dynamic, Property.NodeScope @@ -121,23 +123,49 @@ public class BalancedShardsAllocator implements ShardsAllocator { private final WriteLoadForecaster writeLoadForecaster; + public BalancedShardsAllocator() { + this(Settings.EMPTY); + } + public BalancedShardsAllocator(Settings settings) { - this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), WriteLoadForecaster.DEFAULT); + this(createBuiltInClusterSettings(settings), WriteLoadForecaster.DEFAULT); + } + + public BalancedShardsAllocator(ClusterSettings clusterSettings) { + this(clusterSettings, WriteLoadForecaster.DEFAULT); } @Inject - public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings, WriteLoadForecaster writeLoadForecaster) { - watchSetting(settings, clusterSettings, INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value); - watchSetting(settings, clusterSettings, SHARD_BALANCE_FACTOR_SETTING, value -> this.shardBalanceFactor = value); - watchSetting(settings, clusterSettings, WRITE_LOAD_BALANCE_FACTOR_SETTING, value -> this.writeLoadBalanceFactor = value); - watchSetting(settings, clusterSettings, DISK_USAGE_BALANCE_FACTOR_SETTING, value -> this.diskUsageBalanceFactor = value); - watchSetting(settings, clusterSettings, THRESHOLD_SETTING, value -> this.threshold = value); + public BalancedShardsAllocator(ClusterSettings clusterSettings, WriteLoadForecaster writeLoadForecaster) { + clusterSettings.initializeAndWatch(INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value); + clusterSettings.initializeAndWatch(SHARD_BALANCE_FACTOR_SETTING, value -> this.shardBalanceFactor = value); + clusterSettings.initializeAndWatch(WRITE_LOAD_BALANCE_FACTOR_SETTING, value -> this.writeLoadBalanceFactor = value); + clusterSettings.initializeAndWatch(DISK_USAGE_BALANCE_FACTOR_SETTING, value -> this.diskUsageBalanceFactor = value); + clusterSettings.initializeAndWatch(THRESHOLD_SETTING, value -> this.threshold = ensureValidThreshold(value)); this.writeLoadForecaster = writeLoadForecaster; } - private void watchSetting(Settings settings, ClusterSettings clusterSettings, Setting setting, Consumer consumer) { - consumer.accept(setting.get(settings)); - clusterSettings.addSettingsUpdateConsumer(setting, consumer); + /** + * Clamp threshold to be at least 1, and log a critical deprecation warning if smaller values are given. + * + * Once {@link org.elasticsearch.Version#V_7_17_0} goes out of scope, start to properly reject such bad values. + */ + private static float ensureValidThreshold(float threshold) { + if (1.0f <= threshold) { + return threshold; + } else { + DeprecationLogger.getLogger(BalancedShardsAllocator.class) + .critical( + DeprecationCategory.SETTINGS, + "balance_threshold_too_small", + "ignoring value [{}] for [{}] since it is smaller than 1.0; " + + "setting [{}] to a value smaller than 1.0 will be forbidden in a future release", + threshold, + THRESHOLD_SETTING.getKey(), + THRESHOLD_SETTING.getKey() + ); + return 1.0f; + } } @Override @@ -292,26 +320,26 @@ float minWeightDelta(Balancer balancer, String index) { * A {@link Balancer} */ public static class Balancer { - private final Map nodes; private final WriteLoadForecaster writeLoadForecaster; private final RoutingAllocation allocation; private final RoutingNodes routingNodes; + private final Metadata metadata; private final WeightFunction weight; private final float threshold; - private final Metadata metadata; private final float avgShardsPerNode; private final double avgWriteLoadPerNode; private final double avgDiskUsageInBytesPerNode; + private final Map nodes; private final NodeSorter sorter; public Balancer(WriteLoadForecaster writeLoadForecaster, RoutingAllocation allocation, WeightFunction weight, float threshold) { this.writeLoadForecaster = writeLoadForecaster; this.allocation = allocation; - this.weight = weight; - this.threshold = threshold; this.routingNodes = allocation.routingNodes(); this.metadata = allocation.metadata(); + this.weight = weight; + this.threshold = threshold; avgShardsPerNode = ((float) metadata.getTotalNumberOfShards()) / routingNodes.size(); avgWriteLoadPerNode = getTotalWriteLoad(writeLoadForecaster, metadata) / routingNodes.size(); avgDiskUsageInBytesPerNode = ((double) getTotalDiskUsageInBytes(allocation.clusterInfo(), metadata) / routingNodes.size()); @@ -342,15 +370,10 @@ private static long getTotalDiskUsageInBytes(ClusterInfo clusterInfo, Metadata m // Visible for testing static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata indexMetadata) { - OptionalLong forecastedShardSizeInBytes = indexMetadata.getForecastedShardSizeInBytes(); - final long indexDiskUsageInBytes; - if (forecastedShardSizeInBytes.isPresent()) { - int i = numberOfCopies(indexMetadata); - indexDiskUsageInBytes = forecastedShardSizeInBytes.getAsLong() * i; - } else { - indexDiskUsageInBytes = getIndexDiskUsageInBytesFromClusterInfo(clusterInfo, indexMetadata); - } - return indexDiskUsageInBytes; + var forecastedShardSizeInBytes = indexMetadata.getForecastedShardSizeInBytes(); + return forecastedShardSizeInBytes.isPresent() + ? forecastedShardSizeInBytes.getAsLong() * numberOfCopies(indexMetadata) + : getIndexDiskUsageInBytesFromClusterInfo(clusterInfo, indexMetadata); } private static long getIndexDiskUsageInBytesFromClusterInfo(ClusterInfo clusterInfo, IndexMetadata indexMetadata) { @@ -379,6 +402,10 @@ private static long getIndexDiskUsageInBytesFromClusterInfo(ClusterInfo clusterI return shardCount == 0 ? 0 : (totalSizeInBytes / shardCount) * numberOfCopies(indexMetadata); } + private static long getShardDiskUsageInBytes(ShardRouting shardRouting, IndexMetadata indexMetadata, ClusterInfo clusterInfo) { + return indexMetadata.getForecastedShardSizeInBytes().orElseGet(() -> clusterInfo.getShardSize(shardRouting, 0L)); + } + private static int numberOfCopies(IndexMetadata indexMetadata) { return indexMetadata.getNumberOfShards() * (1 + indexMetadata.getNumberOfReplicas()); } @@ -387,6 +414,14 @@ private double getShardWriteLoad(String index) { return writeLoadForecaster.getForecastedWriteLoad(metadata.index(index)).orElse(0.0); } + private double diskUsageInBytesPerShard(String index) { + var indexMetadata = metadata.index(index); + var forecastedShardSizeInBytes = indexMetadata.getForecastedShardSizeInBytes(); + return forecastedShardSizeInBytes.isPresent() + ? forecastedShardSizeInBytes.getAsLong() + : (double) getIndexDiskUsageInBytesFromClusterInfo(allocation.clusterInfo(), indexMetadata) / numberOfCopies(indexMetadata); + } + /** * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list. */ @@ -416,10 +451,6 @@ public double avgDiskUsageInBytesPerNode() { return avgDiskUsageInBytesPerNode; } - public double diskUsageInBytesPerShard(String index) { - return metadata.index(index).getForecastedShardSizeInBytes().orElse(0); - } - /** * Returns a new {@link NodeSorter} that sorts the nodes based on their * current weight with respect to the index passed to the sorter. The @@ -931,9 +962,9 @@ private Decision decideCanForceAllocateForVacate(ShardRouting shardRouting, Rout * process. In short, this method recreates the status-quo in the cluster. */ private Map buildModelFromAssigned() { - Map nodes = new HashMap<>(); + Map nodes = Maps.newMapWithExpectedSize(routingNodes.size()); for (RoutingNode rn : routingNodes) { - ModelNode node = new ModelNode(writeLoadForecaster, metadata, rn); + ModelNode node = new ModelNode(writeLoadForecaster, metadata, allocation.clusterInfo(), rn); nodes.put(rn.nodeId(), node); for (ShardRouting shard : rn) { assert rn.nodeId().equals(shard.currentNodeId()); @@ -1220,18 +1251,21 @@ private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String id } static class ModelNode implements Iterable { - private final Map indices = new HashMap<>(); private int numShards = 0; private double writeLoad = 0.0; private double diskUsageInBytes = 0.0; private final WriteLoadForecaster writeLoadForecaster; private final Metadata metadata; + private final ClusterInfo clusterInfo; private final RoutingNode routingNode; + private final Map indices; - ModelNode(WriteLoadForecaster writeLoadForecaster, Metadata metadata, RoutingNode routingNode) { + ModelNode(WriteLoadForecaster writeLoadForecaster, Metadata metadata, ClusterInfo clusterInfo, RoutingNode routingNode) { this.writeLoadForecaster = writeLoadForecaster; this.metadata = metadata; + this.clusterInfo = clusterInfo; this.routingNode = routingNode; + this.indices = Maps.newMapWithExpectedSize(routingNode.size() + 10);// some extra to account for shard movements } public ModelIndex getIndex(String indexName) { @@ -1275,7 +1309,7 @@ public void addShard(ShardRouting shard) { indices.computeIfAbsent(shard.getIndexName(), t -> new ModelIndex()).addShard(shard); IndexMetadata indexMetadata = metadata.index(shard.index()); writeLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); - diskUsageInBytes += indexMetadata.getForecastedShardSizeInBytes().orElse(0); + diskUsageInBytes += Balancer.getShardDiskUsageInBytes(shard, indexMetadata, clusterInfo); numShards++; } @@ -1289,7 +1323,7 @@ public void removeShard(ShardRouting shard) { } IndexMetadata indexMetadata = metadata.index(shard.index()); writeLoad -= writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); - diskUsageInBytes -= indexMetadata.getForecastedShardSizeInBytes().orElse(0); + diskUsageInBytes -= Balancer.getShardDiskUsageInBytes(shard, indexMetadata, clusterInfo); numShards--; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java new file mode 100644 index 000000000000..7021bd6554b4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java @@ -0,0 +1,222 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation.allocator; + +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.ToDoubleFunction; + +public record ClusterBalanceStats(Map tiers, Map nodes) + implements + Writeable, + ToXContentObject { + + public static ClusterBalanceStats EMPTY = new ClusterBalanceStats(Map.of(), Map.of()); + + public static ClusterBalanceStats createFrom( + ClusterState clusterState, + ClusterInfo clusterInfo, + WriteLoadForecaster writeLoadForecaster + ) { + var tierToNodeStats = new HashMap>(); + var nodes = new HashMap(); + for (RoutingNode routingNode : clusterState.getRoutingNodes()) { + var dataRoles = routingNode.node().getRoles().stream().filter(DiscoveryNodeRole::canContainData).toList(); + if (dataRoles.isEmpty()) { + continue; + } + var nodeStats = NodeBalanceStats.createFrom(routingNode, clusterState.metadata(), clusterInfo, writeLoadForecaster); + nodes.put(routingNode.node().getName(), nodeStats); + for (DiscoveryNodeRole role : dataRoles) { + tierToNodeStats.computeIfAbsent(role.roleName(), ignored -> new ArrayList<>()).add(nodeStats); + } + } + return new ClusterBalanceStats(Maps.transformValues(tierToNodeStats, TierBalanceStats::createFrom), nodes); + } + + public static ClusterBalanceStats readFrom(StreamInput in) throws IOException { + return new ClusterBalanceStats( + in.readImmutableMap(StreamInput::readString, TierBalanceStats::readFrom), + in.readImmutableMap(StreamInput::readString, NodeBalanceStats::readFrom) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(tiers, StreamOutput::writeString, StreamOutput::writeWriteable); + out.writeMap(nodes, StreamOutput::writeString, StreamOutput::writeWriteable); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field("tiers").map(tiers).field("nodes").map(nodes).endObject(); + } + + public record TierBalanceStats( + MetricStats shardCount, + MetricStats forecastWriteLoad, + MetricStats forecastShardSize, + MetricStats actualShardSize + ) implements Writeable, ToXContentObject { + + private static TierBalanceStats createFrom(List nodes) { + return new TierBalanceStats( + MetricStats.createFrom(nodes, it -> it.shards), + MetricStats.createFrom(nodes, it -> it.forecastWriteLoad), + MetricStats.createFrom(nodes, it -> it.forecastShardSize), + MetricStats.createFrom(nodes, it -> it.actualShardSize) + ); + } + + public static TierBalanceStats readFrom(StreamInput in) throws IOException { + return new TierBalanceStats( + MetricStats.readFrom(in), + MetricStats.readFrom(in), + MetricStats.readFrom(in), + MetricStats.readFrom(in) + ); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardCount.writeTo(out); + forecastWriteLoad.writeTo(out); + forecastShardSize.writeTo(out); + actualShardSize.writeTo(out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("shard_count", shardCount) + .field("forecast_write_load", forecastWriteLoad) + .field("forecast_disk_usage", forecastShardSize) + .field("actual_disk_usage", actualShardSize) + .endObject(); + } + } + + public record MetricStats(double total, double min, double max, double average, double stdDev) implements Writeable, ToXContentObject { + + private static MetricStats createFrom(List nodes, ToDoubleFunction metricExtractor) { + assert nodes.isEmpty() == false : "Stats must be created from non empty nodes"; + double total = 0.0; + double total2 = 0.0; + double min = Double.POSITIVE_INFINITY; + double max = Double.NEGATIVE_INFINITY; + int count = 0; + for (NodeBalanceStats node : nodes) { + var metric = metricExtractor.applyAsDouble(node); + if (Double.isNaN(metric)) { + continue; + } + total += metric; + total2 += Math.pow(metric, 2); + min = Math.min(min, metric); + max = Math.max(max, metric); + count++; + } + double average = count == 0 ? Double.NaN : total / count; + double stdDev = count == 0 ? Double.NaN : Math.sqrt(total2 / count - Math.pow(average, 2)); + return new MetricStats(total, min, max, average, stdDev); + } + + public static MetricStats readFrom(StreamInput in) throws IOException { + return new MetricStats(in.readDouble(), in.readDouble(), in.readDouble(), in.readDouble(), in.readDouble()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeDouble(total); + out.writeDouble(min); + out.writeDouble(max); + out.writeDouble(average); + out.writeDouble(stdDev); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("total", total) + .field("min", min) + .field("max", max) + .field("average", average) + .field("std_dev", stdDev) + .endObject(); + } + } + + public record NodeBalanceStats(int shards, double forecastWriteLoad, long forecastShardSize, long actualShardSize) + implements + Writeable, + ToXContentObject { + + private static NodeBalanceStats createFrom( + RoutingNode routingNode, + Metadata metadata, + ClusterInfo clusterInfo, + WriteLoadForecaster writeLoadForecaster + ) { + double forecastWriteLoad = 0.0; + long forecastShardSize = 0L; + long actualShardSize = 0L; + + for (ShardRouting shardRouting : routingNode) { + var indexMetadata = metadata.index(shardRouting.index()); + var shardSize = clusterInfo.getShardSize(shardRouting, 0L); + assert indexMetadata != null; + forecastWriteLoad += writeLoadForecaster.getForecastedWriteLoad(indexMetadata).orElse(0.0); + forecastShardSize += indexMetadata.getForecastedShardSizeInBytes().orElse(shardSize); + actualShardSize += shardSize; + } + + return new NodeBalanceStats(routingNode.size(), forecastWriteLoad, forecastShardSize, actualShardSize); + } + + public static NodeBalanceStats readFrom(StreamInput in) throws IOException { + return new NodeBalanceStats(in.readInt(), in.readDouble(), in.readLong(), in.readLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(shards); + out.writeDouble(forecastWriteLoad); + out.writeLong(forecastShardSize); + out.writeLong(actualShardSize); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("shard_count", shards) + .field("forecast_write_load", forecastWriteLoad) + .humanReadableField("forecast_disk_usage_bytes", "forecast_disk_usage", ByteSizeValue.ofBytes(forecastShardSize)) + .humanReadableField("actual_disk_usage_bytes", "actual_disk_usage", ByteSizeValue.ofBytes(actualShardSize)) + .endObject(); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java index 97a39321fba3..7603659a9680 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputer.java @@ -16,10 +16,15 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.metrics.MeanMetric; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.HashMap; @@ -43,12 +48,25 @@ public class DesiredBalanceComputer { private static final Logger logger = LogManager.getLogger(DesiredBalanceComputer.class); + private final ThreadPool threadPool; private final ShardsAllocator delegateAllocator; protected final MeanMetric iterations = new MeanMetric(); - public DesiredBalanceComputer(ShardsAllocator delegateAllocator) { + public static final Setting PROGRESS_LOG_INTERVAL_SETTING = Setting.timeSetting( + "cluster.routing.allocation.desired_balance.progress_log_interval", + TimeValue.timeValueMinutes(1), + TimeValue.ZERO, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private TimeValue progressLogInterval; + + public DesiredBalanceComputer(ClusterSettings clusterSettings, ThreadPool threadPool, ShardsAllocator delegateAllocator) { + this.threadPool = threadPool; this.delegateAllocator = delegateAllocator; + clusterSettings.initializeAndWatch(PROGRESS_LOG_INTERVAL_SETTING, value -> this.progressLogInterval = value); } public DesiredBalance compute( @@ -211,6 +229,11 @@ public DesiredBalance compute( } } + final int iterationCountReportInterval = computeIterationCountReportInterval(routingAllocation); + final long timeWarningInterval = progressLogInterval.millis(); + final long computationStartedTime = threadPool.relativeTimeInMillis(); + long nextReportTime = computationStartedTime + timeWarningInterval; + int i = 0; boolean hasChanges = false; while (true) { @@ -246,30 +269,45 @@ public DesiredBalance compute( } i++; + final int iterations = i; + final long currentTime = threadPool.relativeTimeInMillis(); + final boolean reportByTime = nextReportTime <= currentTime; + final boolean reportByIterationCount = i % iterationCountReportInterval == 0; + if (reportByTime || reportByIterationCount) { + nextReportTime = currentTime + timeWarningInterval; + } + if (hasChanges == false) { - logger.debug("Desired balance computation for [{}] converged after [{}] iterations", desiredBalanceInput.index(), i); + logger.debug( + "Desired balance computation for [{}] converged after [{}] and [{}] iterations", + desiredBalanceInput.index(), + TimeValue.timeValueMillis(currentTime - computationStartedTime).toString(), + i + ); break; } if (isFresh.test(desiredBalanceInput) == false) { // we run at least one iteration, but if another reroute happened meanwhile // then publish the interim state and restart the calculation - logger.debug(""" - Newer cluster state received after [{}] iterations, publishing incomplete desired balance for [{}] and restarting \ - computation - """, i, desiredBalanceInput.index()); - break; - } - if (i % 100 == 0) { - // TODO this warning should be time based, iteration count should be proportional to the number of shards - logger.log( - i % 1000000 == 0 ? Level.INFO : Level.DEBUG, - Strings.format( - "Desired balance computation for [%d] is still not converged after [%d] iterations", - desiredBalanceInput.index(), - i - ) + logger.debug( + "Desired balance computation for [{}] interrupted after [{}] and [{}] iterations as newer cluster state received. " + + "Publishing intermediate desired balance and restarting computation", + desiredBalanceInput.index(), + i, + TimeValue.timeValueMillis(currentTime - computationStartedTime).toString() ); + break; } + + logger.log( + reportByIterationCount || reportByTime ? Level.INFO : i % 100 == 0 ? Level.DEBUG : Level.TRACE, + () -> Strings.format( + "Desired balance computation for [%d] is still not converged after [%s] and [%d] iterations", + desiredBalanceInput.index(), + TimeValue.timeValueMillis(currentTime - computationStartedTime).toString(), + iterations + ) + ); } iterations.inc(i); @@ -347,4 +385,13 @@ private static UnassignedInfo discardAllocationStatus(UnassignedInfo info) { info.getLastAllocatedNodeId() ); } + + private static int computeIterationCountReportInterval(RoutingAllocation allocation) { + final int relativeSize = allocation.metadata().getTotalNumberOfShards(); + int iterations = 1000; + while (iterations < relativeSize && iterations < 1_000_000_000) { + iterations *= 10; + } + return iterations; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 6bf0ed8eeba0..19a388964fea 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -11,6 +11,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; @@ -95,8 +98,11 @@ private boolean allocateUnassignedInvariant() { assert routingNodes.unassigned().isEmpty(); - final var shardCounts = allocation.metadata() - .stream() + final var shardCounts = allocation.metadata().stream().filter(indexMetadata -> + // skip any pre-7.2 closed indices which have no routing table entries at all + indexMetadata.getCreationVersion().onOrAfter(Version.V_7_2_0) + || indexMetadata.getState() == IndexMetadata.State.OPEN + || MetadataIndexStateService.isIndexVerifiedBeforeClosed(indexMetadata)) .flatMap( indexMetadata -> IntStream.range(0, indexMetadata.getNumberOfShards()) .mapToObj( @@ -151,7 +157,7 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { private void allocateUnassigned() { RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned(); if (logger.isTraceEnabled()) { - logger.trace("Start allocating unassigned shards"); + logger.trace("Start allocating unassigned shards: {}", routingNodes.toString()); } if (unassigned.isEmpty()) { return; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index 652fe0c468ec..f9e0a70555e8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; @@ -77,12 +78,19 @@ public interface DesiredBalanceReconcilerAction { } public DesiredBalanceShardsAllocator( + ClusterSettings clusterSettings, ShardsAllocator delegateAllocator, ThreadPool threadPool, ClusterService clusterService, DesiredBalanceReconcilerAction reconciler ) { - this(delegateAllocator, threadPool, clusterService, new DesiredBalanceComputer(delegateAllocator), reconciler); + this( + delegateAllocator, + threadPool, + clusterService, + new DesiredBalanceComputer(clusterSettings, threadPool, delegateAllocator), + reconciler + ); } public DesiredBalanceShardsAllocator( @@ -211,6 +219,11 @@ protected void reconcile(DesiredBalance desiredBalance, RoutingAllocation alloca } allocationOrdering.retainNodes(getNodeIds(allocation.routingNodes())); recordTime(cumulativeReconciliationTime, new DesiredBalanceReconciler(desiredBalance, allocation, allocationOrdering)::run); + if (logger.isTraceEnabled()) { + logger.trace("Reconciled desired balance: {}", desiredBalance); + } else { + logger.debug("Reconciled desired balance for [{}]", desiredBalance.lastConvergedIndex()); + } } public DesiredBalance getDesiredBalance() { @@ -251,6 +264,11 @@ public void onFailure(Exception e) { assert MasterService.isPublishFailureException(e) : e; onNoLongerMaster(); } + + @Override + public String toString() { + return "ReconcileDesiredBalanceTask[lastConvergedIndex=" + desiredBalance.lastConvergedIndex() + "]"; + } } private final class ReconcileDesiredBalanceExecutor implements ClusterStateTaskExecutor { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index ce7b24d8e60c..f0634fa508ce 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -26,7 +26,7 @@ public record DesiredBalanceStats( long computationIterations, long cumulativeComputationTime, long cumulativeReconciliationTime -) implements Writeable, ToXContentFragment { +) implements Writeable, ToXContentObject { public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { return new DesiredBalanceStats( @@ -55,7 +55,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - + builder.startObject(); builder.field("computation_active", computationActive); builder.field("computation_submitted", computationSubmitted); builder.field("computation_executed", computationExecuted); @@ -64,7 +64,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("computation_converged_index", lastConvergedIndex); builder.humanReadableField("computation_time_in_millis", "computation_time", new TimeValue(cumulativeComputationTime)); builder.humanReadableField("reconciliation_time_in_millis", "reconciliation_time", new TimeValue(cumulativeReconciliationTime)); - + builder.endObject(); return builder; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index d80c51d9740f..3b8b30be9545 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -142,6 +142,14 @@ public Decision canAllocate(IndexMetadata indexMetadata, RoutingNode node, Routi Decision decision = allocationDecider.canAllocate(indexMetadata, node, allocation); // short track if a NO is returned. if (decision.type() == Decision.Type.NO) { + if (logger.isTraceEnabled()) { + logger.trace( + "Can not allocate [{}] on node [{}] due to [{}]", + indexMetadata.getIndex().getName(), + node.node(), + allocationDecider.getClass().getSimpleName() + ); + } if (allocation.debugDecision() == false) { return Decision.NO; } else { @@ -178,6 +186,13 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat Decision decision = allocationDecider.canAllocate(shardRouting, allocation); // short track if a NO is returned. if (decision.type() == Decision.Type.NO) { + if (logger.isTraceEnabled()) { + logger.trace( + "Can not allocate [{}] on any node due to [{}]", + shardRouting, + allocationDecider.getClass().getSimpleName() + ); + } if (allocation.debugDecision() == false) { return Decision.NO; } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 9d22bbd20385..88d4a652a5a3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import java.util.Locale; @@ -89,10 +88,9 @@ public String toString() { private volatile ClusterRebalanceType type; - public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings); + public ClusterRebalanceAllocationDecider(ClusterSettings clusterSettings) { + clusterSettings.initializeAndWatch(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); } private void setType(ClusterRebalanceType type) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index bd32f670e226..fd3ce510a105 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; /** * Similar to the {@link ClusterRebalanceAllocationDecider} this @@ -44,13 +43,12 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { ); private volatile int clusterConcurrentRebalance; - public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings); - logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); - clusterSettings.addSettingsUpdateConsumer( + public ConcurrentRebalanceAllocationDecider(ClusterSettings clusterSettings) { + clusterSettings.initializeAndWatch( CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance ); + logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); } private void setClusterConcurrentRebalance(int concurrentRebalance) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 19b2e3d986eb..11624e39d176 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -84,11 +84,9 @@ public class EnableAllocationDecider extends AllocationDecider { private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; - public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); - this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); + public EnableAllocationDecider(ClusterSettings clusterSettings) { + clusterSettings.initializeAndWatch(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); + clusterSettings.initializeAndWatch(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); } private void setEnableRebalance(Rebalance enableRebalance) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java index d244c38b5913..f37039608d7b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/MaxRetryAllocationDecider.java @@ -34,6 +34,8 @@ public class MaxRetryAllocationDecider extends AllocationDecider { Setting.Property.NotCopyableOnResize ); + private static final String RETRY_FAILED_API = "POST /_cluster/reroute?retry_failed&metric=none"; + public static final String NAME = "max_retry"; private static final Decision YES_NO_FAILURES = Decision.single(Decision.Type.YES, NAME, "shard has no previous failures"); @@ -69,9 +71,9 @@ private static Decision debugDecision(Decision decision, UnassignedInfo info, in return Decision.single( Decision.Type.NO, NAME, - "shard has exceeded the maximum number of retries [%d] on failed allocation attempts - " - + "manually call [/_cluster/reroute?retry_failed=true] to retry, [%s]", + "shard has exceeded the maximum number of retries [%d] on failed allocation attempts - manually call [%s] to retry, [%s]", maxRetries, + RETRY_FAILED_API, info.toString() ); } else { @@ -90,9 +92,9 @@ private static Decision debugDecision(Decision decision, RelocationFailureInfo i return Decision.single( Decision.Type.NO, NAME, - "shard has exceeded the maximum number of retries [%d] on failed relocation attempts - " - + "manually call [/_cluster/reroute?retry_failed=true] to retry, [%s]", + "shard has exceeded the maximum number of retries [%d] on failed relocation attempts - manually call [%s] to retry, [%s]", maxRetries, + RETRY_FAILED_API, info.toString() ); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 44ec04a3e1ae..7f34150f2674 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; /** * An allocation decider that prevents multiple instances of the same shard to @@ -46,9 +45,8 @@ public class SameShardAllocationDecider extends AllocationDecider { private volatile boolean sameHost; - public SameShardAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - this.sameHost = CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, this::setSameHost); + public SameShardAllocationDecider(ClusterSettings clusterSettings) { + clusterSettings.initializeAndWatch(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING, this::setSameHost); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index e53688654e64..c43fd599df13 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import java.util.function.BiPredicate; @@ -70,9 +69,8 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { Property.NodeScope ); - public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + public ShardsLimitAllocationDecider(ClusterSettings clusterSettings) { + clusterSettings.initializeAndWatch(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); } private void setClusterShardLimit(int clusterShardLimit) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 3952330ba829..7011258e61a6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; import static org.elasticsearch.cluster.routing.allocation.decider.Decision.THROTTLE; import static org.elasticsearch.cluster.routing.allocation.decider.Decision.YES; @@ -81,24 +80,19 @@ public class ThrottlingAllocationDecider extends AllocationDecider { private volatile int concurrentIncomingRecoveries; private volatile int concurrentOutgoingRecoveries; - public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) { - this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings); - concurrentIncomingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.get(settings); - concurrentOutgoingRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.get(settings); - - clusterSettings.addSettingsUpdateConsumer( + public ThrottlingAllocationDecider(ClusterSettings clusterSettings) { + clusterSettings.initializeAndWatch( CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries ); - clusterSettings.addSettingsUpdateConsumer( + clusterSettings.initializeAndWatch( CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, this::setConcurrentIncomingRecoverries ); - clusterSettings.addSettingsUpdateConsumer( + clusterSettings.initializeAndWatch( CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING, this::setConcurrentOutgoingRecoverries ); - logger.debug( "using node_concurrent_outgoing_recoveries [{}], node_concurrent_incoming_recoveries [{}], " + "node_initial_primaries_recoveries [{}]", diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 49935668ed47..699e559a19cc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -537,6 +537,7 @@ private static void callClusterStateAppliers( try (Releasable ignored = stopWatch.record(name)) { applier.applyClusterState(clusterChangedEvent); } + // TODO assert "ClusterStateApplier must not set response headers in the ClusterApplierService" } } @@ -560,6 +561,7 @@ private static void callClusterStateListener( } catch (Exception ex) { logger.warn("failed to notify ClusterStateListener", ex); } + // TODO assert "ClusterStateApplier must not set response headers in the ClusterStateListener" } } @@ -654,4 +656,9 @@ protected boolean applicationMayFail() { public ClusterApplierRecordingService.Stats getStats() { return recordingService.getStats(); } + + // Exposed only for testing + public int getTimeoutClusterStateListenersSize() { + return timeoutClusterStateListeners.size(); + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java index 96f9bd5c084e..2efafb228bb5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.service; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -117,7 +117,7 @@ public ClusterStateUpdateStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getVersion().onOrAfter(Version.V_7_16_0) : out.getVersion(); + assert out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) : out.getTransportVersion(); out.writeVLong(unchangedTaskCount); out.writeVLong(publicationSuccessCount); out.writeVLong(publicationFailureCount); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 06c4df9a87a3..17e78d6ca474 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -277,12 +277,6 @@ private void runTasks( previousClusterState, executeTasks(previousClusterState, executionResults, executor, summary, threadPool.getThreadContext()) ); - // fail all tasks that have failed - for (final var executionResult : executionResults) { - if (executionResult.failure != null) { - executionResult.updateTask.onFailure(executionResult.failure, executionResult::restoreResponseHeaders); - } - } final TimeValue computationTime = getTimeSince(computationStartTime); logExecutionTime(computationTime, "compute cluster state update", summary); @@ -950,7 +944,7 @@ void onBatchFailure(Exception failure) { void onPublishSuccess(ClusterState newClusterState) { if (publishedStateConsumer == null && onPublicationSuccess == null) { - assert failure != null; + notifyFailure(); return; } try (ThreadContext.StoredContext ignored = updateTask.threadContextSupplier.get()) { @@ -967,7 +961,7 @@ void onPublishSuccess(ClusterState newClusterState) { void onClusterStateUnchanged(ClusterState clusterState) { if (publishedStateConsumer == null && onPublicationSuccess == null) { - assert failure != null; + notifyFailure(); return; } try (ThreadContext.StoredContext ignored = updateTask.threadContextSupplier.get()) { @@ -985,6 +979,10 @@ void onClusterStateUnchanged(ClusterState clusterState) { void onPublishFailure(FailedToCommitClusterStateException e) { if (publishedStateConsumer == null && onPublicationSuccess == null) { assert failure != null; + var taskFailure = failure; + failure = new FailedToCommitClusterStateException(e.getMessage(), e); + failure.addSuppressed(taskFailure); + notifyFailure(); return; } try (ThreadContext.StoredContext ignored = updateTask.threadContextSupplier.get()) { @@ -996,9 +994,14 @@ void onPublishFailure(FailedToCommitClusterStateException e) { } } + void notifyFailure() { + assert failure != null; + this.updateTask.onFailure(this.failure, this::restoreResponseHeaders); + } + ContextPreservingAckListener getContextPreservingAckListener() { assert incomplete() == false; - return updateTask.wrapInTaskContext(clusterStateAckListener, this::restoreResponseHeaders); + return failure == null ? updateTask.wrapInTaskContext(clusterStateAckListener, this::restoreResponseHeaders) : null; } @Override diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java new file mode 100644 index 000000000000..856ba30d6c4a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -0,0 +1,98 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.net.URL; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Encapsulates links to pages in the reference docs, so that for example we can include URLs in logs and API outputs. Each instance's + * {@link #toString()} yields (a string representation of) a URL for the relevant docs. Links are defined in the resource file + * {@code reference-docs-links.json} which must include definitions for exactly the set of values of this enum. + */ +public enum ReferenceDocs { + INITIAL_MASTER_NODES, + DISCOVERY_TROUBLESHOOTING, + UNSTABLE_CLUSTER_TROUBLESHOOTING, + LAGGING_NODE_TROUBLESHOOTING, + SHARD_LOCK_TROUBLESHOOTING, + CONCURRENT_REPOSITORY_WRITERS, + ARCHIVE_INDICES, + // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner + ; + + private static final Map linksBySymbol; + + static { + try (var resourceStream = readFromJarResourceUrl(ReferenceDocs.class.getResource("reference-docs-links.json"))) { + linksBySymbol = Map.copyOf(readLinksBySymbol(resourceStream)); + } catch (Exception e) { + assert false : e; + throw new IllegalStateException("could not read links resource", e); + } + } + + static final String UNRELEASED_VERSION_COMPONENT = "master"; + static final String VERSION_COMPONENT = getVersionComponent(Version.CURRENT, Build.CURRENT.isSnapshot()); + + static Map readLinksBySymbol(InputStream inputStream) throws Exception { + try (var parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, inputStream)) { + final var result = parser.map(LinkedHashMap::new, XContentParser::text); + final var iterator = result.keySet().iterator(); + for (int i = 0; i < values().length; i++) { + final var expected = values()[i].name(); + if (iterator.hasNext() == false) { + throw new IllegalStateException("ran out of values at index " + i + ": expecting " + expected); + } + final var actual = iterator.next(); + if (actual.equals(expected) == false) { + throw new IllegalStateException("mismatch at index " + i + ": found " + actual + " but expected " + expected); + } + } + if (iterator.hasNext()) { + throw new IllegalStateException("found unexpected extra value: " + iterator.next()); + } + return result; + } + } + + /** + * Compute the version component of the URL path (e.g. {@code 8.5} or {@code master}) for a particular version of Elasticsearch. Exposed + * for testing, but all items use {@link #VERSION_COMPONENT} ({@code getVersionComponent(Version.CURRENT, Build.CURRENT.isSnapshot())}) + * which relates to the current version and build. + */ + static String getVersionComponent(Version version, boolean isSnapshot) { + return isSnapshot && version.revision == 0 ? UNRELEASED_VERSION_COMPONENT : version.major + "." + version.minor; + } + + @Override + public String toString() { + return "https://www.elastic.co/guide/en/elasticsearch/reference/" + VERSION_COMPONENT + "/" + linksBySymbol.get(name()); + } + + @SuppressForbidden(reason = "reads resource from jar") + private static InputStream readFromJarResourceUrl(URL source) throws IOException { + if (source == null) { + throw new FileNotFoundException("links resource not found at [" + source + "]"); + } + return source.openStream(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/Strings.java b/server/src/main/java/org/elasticsearch/common/Strings.java index 0fed68bdf732..d0dab040558b 100644 --- a/server/src/main/java/org/elasticsearch/common/Strings.java +++ b/server/src/main/java/org/elasticsearch/common/Strings.java @@ -756,7 +756,7 @@ public static String toString(ToXContent toXContent) { */ @Deprecated public static String toString(ChunkedToXContent chunkedToXContent) { - return toString(ChunkedToXContent.wrapAsXContentObject(chunkedToXContent)); + return toString(chunkedToXContent, false, false); } /** @@ -795,7 +795,7 @@ public static String toString(ToXContent toXContent, boolean pretty, boolean hum */ @Deprecated public static String toString(ChunkedToXContent chunkedToXContent, boolean pretty, boolean human) { - return toString(ChunkedToXContent.wrapAsXContentObject(chunkedToXContent), pretty, human); + return toString(ChunkedToXContent.wrapAsToXContent(chunkedToXContent), pretty, human); } /** @@ -917,4 +917,11 @@ public static String toLowercaseAscii(String in) { .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) .toString(); } + + /** + * Alias for {@link org.elasticsearch.core.Strings#format} + */ + public static String format(String format, Object... args) { + return org.elasticsearch.core.Strings.format(format, args); + } } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 4ae67e81ded9..8b48e871fec6 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -137,6 +137,11 @@ public int getIntLE(int index) { return ByteUtils.readIntLE(bytes, offset + index); } + @Override + public long getLongLE(int index) { + return ByteUtils.readLongLE(bytes, offset + index); + } + @Override public double getDoubleLE(int index) { return ByteUtils.readDoubleLE(bytes, offset + index); diff --git a/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java index aa4ead0b6b90..9a75ebd0e60e 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/CompositeBytesReference.java @@ -238,6 +238,18 @@ public int getIntLE(int index) { return super.getIntLE(index); } + @Override + public long getLongLE(int index) { + int i = getOffsetIndex(index); + int idx = index - offsets[i]; + int end = idx + 8; + BytesReference wholeLongsLivesHere = references[i]; + if (end <= wholeLongsLivesHere.length()) { + return wholeLongsLivesHere.getLongLE(idx); + } + return super.getLongLE(index); + } + @Override public double getDoubleLE(int index) { int i = getOffsetIndex(index); diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index a2629ffd0556..2f78ba70529c 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -8,9 +8,13 @@ package org.elasticsearch.common.collect; +import org.elasticsearch.core.Nullable; + +import java.util.Collections; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.function.Function; public class Iterators { @@ -43,36 +47,33 @@ public static Iterator concat(Iterator... iterators) { throw new NullPointerException("iterators"); } - // explicit generic type argument needed for type inference - return new ConcatenatedIterator(iterators); + for (int i = 0; i < iterators.length; i++) { + if (iterators[i].hasNext()) { + // explicit generic type argument needed for type inference + return new ConcatenatedIterator(iterators, i); + } + } + + return Collections.emptyIterator(); } - static class ConcatenatedIterator implements Iterator { + private static class ConcatenatedIterator implements Iterator { private final Iterator[] iterators; - private int index = 0; + private int index; - @SafeVarargs - @SuppressWarnings("varargs") - ConcatenatedIterator(Iterator... iterators) { - if (iterators == null) { - throw new NullPointerException("iterators"); - } - for (int i = 0; i < iterators.length; i++) { + ConcatenatedIterator(Iterator[] iterators, int startIndex) { + for (int i = startIndex; i < iterators.length; i++) { if (iterators[i] == null) { throw new NullPointerException("iterators[" + i + "]"); } } this.iterators = iterators; + this.index = startIndex; } @Override public boolean hasNext() { - boolean hasNext = false; - while (index < iterators.length && (hasNext = iterators[index].hasNext()) == false) { - index++; - } - - return hasNext; + return index < iterators.length; } @Override @@ -80,7 +81,11 @@ public T next() { if (hasNext() == false) { throw new NoSuchElementException(); } - return iterators[index].next(); + final T value = iterators[index].next(); + while (index < iterators.length && iterators[index].hasNext() == false) { + index++; + } + return value; } } @@ -110,4 +115,54 @@ public T next() { return array[index++]; } } + + public static Iterator flatMap(Iterator input, Function> fn) { + while (input.hasNext()) { + final var value = fn.apply(input.next()); + if (value.hasNext()) { + return new FlatMapIterator<>(input, fn, value); + } + } + + return Collections.emptyIterator(); + } + + private static final class FlatMapIterator implements Iterator { + + private final Iterator input; + private final Function> fn; + + @Nullable // if finished, otherwise currentOutput.hasNext() is true + private Iterator currentOutput; + + FlatMapIterator(Iterator input, Function> fn, Iterator firstOutput) { + this.input = input; + this.fn = fn; + this.currentOutput = firstOutput; + } + + @Override + public boolean hasNext() { + return currentOutput != null; + } + + @Override + public U next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + // noinspection ConstantConditions this is for documentation purposes + assert currentOutput != null && currentOutput.hasNext(); + final U value = currentOutput.next(); + while (currentOutput != null && currentOutput.hasNext() == false) { + if (input.hasNext()) { + currentOutput = fn.apply(input.next()); + } else { + currentOutput = null; + } + } + return value; + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index b6b69a9d4d22..2de39744e40a 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -9,7 +9,7 @@ package org.elasticsearch.common.compress; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; @@ -209,7 +209,7 @@ public String getSha256() { public static CompressedXContent readCompressedString(StreamInput in) throws IOException { final String sha256; final byte[] compressedData; - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { sha256 = in.readString(); compressedData = in.readByteArray(); } else { @@ -221,7 +221,7 @@ public static CompressedXContent readCompressedString(StreamInput in) throws IOE } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { out.writeString(sha256); } else { int crc32 = crc32FromCompressed(bytes); diff --git a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java index b8879ec299b3..4ff54ded571b 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java +++ b/server/src/main/java/org/elasticsearch/common/compress/DeflateCompressor.java @@ -10,9 +10,9 @@ import org.elasticsearch.Assertions; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Streams; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index ec6f378c4c07..5828b485ce36 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.document; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -44,12 +44,12 @@ public class DocumentField implements Writeable, Iterable { public DocumentField(StreamInput in) throws IOException { name = in.readString(); values = in.readList(StreamInput::readGenericValue); - if (in.getVersion().onOrAfter(Version.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { ignoredValues = in.readList(StreamInput::readGenericValue); } else { ignoredValues = Collections.emptyList(); } - if (in.getVersion().onOrAfter(Version.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { lookupFields = in.readList(LookupField::new); } else { lookupFields = List.of(); @@ -114,10 +114,10 @@ public List getIgnoredValues() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeCollection(values, StreamOutput::writeGenericValue); - if (out.getVersion().onOrAfter(Version.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { out.writeCollection(ignoredValues, StreamOutput::writeGenericValue); } - if (out.getVersion().onOrAfter(Version.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { out.writeList(lookupFields); } else { if (lookupFields.isEmpty() == false) { diff --git a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java index 93cb30eff3e2..9352c84db28f 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java +++ b/server/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java @@ -70,8 +70,6 @@ private MembersInjectorImpl createWithListeners(TypeLiteral type, Erro List injectors = getInjectors(injectionPoints, errors); errors.throwIfNewErrors(numErrorsBefore); - errors.throwIfNewErrors(numErrorsBefore); - return new MembersInjectorImpl<>(injector, type, injectors); } diff --git a/server/src/main/java/org/elasticsearch/common/io/Streams.java b/server/src/main/java/org/elasticsearch/common/io/Streams.java index 1089884ee0e0..fd16c80df1da 100644 --- a/server/src/main/java/org/elasticsearch/common/io/Streams.java +++ b/server/src/main/java/org/elasticsearch/common/io/Streams.java @@ -136,22 +136,6 @@ public static String copyToString(Reader in) throws IOException { return out.toString(); } - public static int readFully(InputStream reader, byte[] dest) throws IOException { - return readFully(reader, dest, 0, dest.length); - } - - public static int readFully(InputStream reader, byte[] dest, int offset, int len) throws IOException { - int read = 0; - while (read < len) { - final int r = reader.read(dest, offset + read, len - read); - if (r == -1) { - break; - } - read += r; - } - return read; - } - /** * Fully consumes the input stream, throwing the bytes away. Returns the number of bytes consumed. */ diff --git a/server/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java b/server/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java deleted file mode 100644 index d912db541f05..000000000000 --- a/server/src/main/java/org/elasticsearch/common/io/UTF8StreamWriter.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.io; - -import java.io.CharConversionException; -import java.io.IOException; -import java.io.OutputStream; -import java.io.Writer; - -public final class UTF8StreamWriter extends Writer { - - /** - * Holds the current output stream or null if closed. - */ - private OutputStream _outputStream; - - /** - * Holds the bytes' buffer. - */ - private final byte[] _bytes; - - /** - * Holds the bytes buffer index. - */ - private int _index; - - /** - * Creates a UTF-8 writer having a byte buffer of moderate capacity (2048). - */ - public UTF8StreamWriter() { - _bytes = new byte[2048]; - } - - /** - * Creates a UTF-8 writer having a byte buffer of specified capacity. - * - * @param capacity the capacity of the byte buffer. - */ - public UTF8StreamWriter(int capacity) { - _bytes = new byte[capacity]; - } - - /** - * Sets the output stream to use for writing until this writer is closed. - * For example:[code] - * Writer writer = new UTF8StreamWriter().setOutputStream(out); - * [/code] is equivalent but writes faster than [code] - * Writer writer = new java.io.OutputStreamWriter(out, "UTF-8"); - * [/code] - * - * @param out the output stream. - * @return this UTF-8 writer. - * @throws IllegalStateException if this writer is being reused and - * it has not been {@link #close closed} or {@link #reset reset}. - */ - public UTF8StreamWriter setOutput(OutputStream out) { - if (_outputStream != null) throw new IllegalStateException("Writer not closed or reset"); - _outputStream = out; - return this; - } - - /** - * Writes a single character. This method supports 16-bits - * character surrogates. - * - * @param c char the character to be written (possibly - * a surrogate). - * @throws IOException if an I/O error occurs. - */ - public void write(char c) throws IOException { - if ((c < 0xd800) || (c > 0xdfff)) { - write((int) c); - } else if (c < 0xdc00) { // High surrogate. - _highSurrogate = c; - } else { // Low surrogate. - int code = ((_highSurrogate - 0xd800) << 10) + (c - 0xdc00) + 0x10000; - write(code); - } - } - - private char _highSurrogate; - - /** - * Writes a character given its 31-bits Unicode. - * - * @param code the 31 bits Unicode of the character to be written. - * @throws IOException if an I/O error occurs. - */ - @Override - public void write(int code) throws IOException { - if ((code & 0xffffff80) == 0) { - _bytes[_index] = (byte) code; - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else { // Writes more than one byte. - write2(code); - } - } - - private void write2(int c) throws IOException { - if ((c & 0xfffff800) == 0) { // 2 bytes. - _bytes[_index] = (byte) (0xc0 | (c >> 6)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | (c & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else if ((c & 0xffff0000) == 0) { // 3 bytes. - _bytes[_index] = (byte) (0xe0 | (c >> 12)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | (c & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else if ((c & 0xff200000) == 0) { // 4 bytes. - _bytes[_index] = (byte) (0xf0 | (c >> 18)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 12) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | (c & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else if ((c & 0xf4000000) == 0) { // 5 bytes. - _bytes[_index] = (byte) (0xf8 | (c >> 24)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 18) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 12) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | (c & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else if ((c & 0x80000000) == 0) { // 6 bytes. - _bytes[_index] = (byte) (0xfc | (c >> 30)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 24) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 18) & 0x3f)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 12) & 0x3F)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | ((c >> 6) & 0x3F)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - _bytes[_index] = (byte) (0x80 | (c & 0x3F)); - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else { - throw new CharConversionException("Illegal character U+" + Integer.toHexString(c)); - } - } - - /** - * Writes a portion of an array of characters. - * - * @param cbuf the array of characters. - * @param off the offset from which to start writing characters. - * @param len the number of characters to write. - * @throws IOException if an I/O error occurs. - */ - @Override - public void write(char cbuf[], int off, int len) throws IOException { - final int off_plus_len = off + len; - for (int i = off; i < off_plus_len;) { - char c = cbuf[i++]; - if (c < 0x80) { - _bytes[_index] = (byte) c; - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else { - write(c); - } - } - } - - /** - * Writes a portion of a string. - * - * @param str a String. - * @param off the offset from which to start writing characters. - * @param len the number of characters to write. - * @throws IOException if an I/O error occurs - */ - @Override - public void write(String str, int off, int len) throws IOException { - final int off_plus_len = off + len; - for (int i = off; i < off_plus_len;) { - char c = str.charAt(i++); - if (c < 0x80) { - _bytes[_index] = (byte) c; - if (++_index >= _bytes.length) { - flushBuffer(); - } - } else { - write(c); - } - } - } - - /** - * Flushes the stream. If the stream has saved any characters from the - * various write() methods in a buffer, write them immediately to their - * intended destination. Then, if that destination is another character or - * byte stream, flush it. Thus one flush() invocation will flush all the - * buffers in a chain of Writers and OutputStreams. - * - * @throws IOException if an I/O error occurs. - */ - @Override - public void flush() throws IOException { - flushBuffer(); - _outputStream.flush(); - } - - /** - * Closes and {@link #reset resets} this writer for reuse. - * - * @throws IOException if an I/O error occurs - */ - @Override - public void close() throws IOException { - if (_outputStream != null) { - flushBuffer(); - _outputStream.close(); - reset(); - } - } - - /** - * Flushes the internal bytes buffer. - * - * @throws IOException if an I/O error occurs - */ - private void flushBuffer() throws IOException { - if (_outputStream == null) throw new IOException("Stream closed"); - _outputStream.write(_bytes, 0, _index); - _index = 0; - } - - // Implements Reusable. - public void reset() { - _highSurrogate = 0; - _index = 0; - _outputStream = null; - } -} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java b/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java index 5c637f27dfd1..8ec408c2b08c 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/DelayableWriteable.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.core.Releasable; @@ -49,12 +49,12 @@ public static DelayableWriteable referencing(T referenc * when {@link #expand()} is called. */ public static DelayableWriteable delayed(Writeable.Reader reader, StreamInput in) throws IOException { - return new Serialized<>(reader, in.getVersion(), in.namedWriteableRegistry(), in.readReleasableBytesReference()); + return new Serialized<>(reader, in.getTransportVersion(), in.namedWriteableRegistry(), in.readReleasableBytesReference()); } public static DelayableWriteable referencing(Writeable.Reader reader, StreamInput in) throws IOException { try (ReleasableBytesReference serialized = in.readReleasableBytesReference()) { - return new Referencing<>(deserialize(reader, in.getVersion(), in.namedWriteableRegistry(), serialized)); + return new Referencing<>(deserialize(reader, in.getTransportVersion(), in.namedWriteableRegistry(), serialized)); } } @@ -103,12 +103,12 @@ public T expand() { public Serialized asSerialized(Reader reader, NamedWriteableRegistry registry) { BytesStreamOutput buffer; try { - buffer = writeToBuffer(Version.CURRENT); + buffer = writeToBuffer(TransportVersion.CURRENT); } catch (IOException e) { throw new RuntimeException("unexpected error writing writeable to buffer", e); } // TODO: this path is currently not used in production code, if it ever is this should start using pooled buffers - return new Serialized<>(reader, Version.CURRENT, registry, ReleasableBytesReference.wrap(buffer.bytes())); + return new Serialized<>(reader, TransportVersion.CURRENT, registry, ReleasableBytesReference.wrap(buffer.bytes())); } @Override @@ -121,9 +121,9 @@ public long getSerializedSize() { return DelayableWriteable.getSerializedSize(reference); } - private BytesStreamOutput writeToBuffer(Version version) throws IOException { + private BytesStreamOutput writeToBuffer(TransportVersion version) throws IOException { try (BytesStreamOutput buffer = new BytesStreamOutput()) { - buffer.setVersion(version); + buffer.setTransportVersion(version); reference.writeTo(buffer); return buffer; } @@ -141,13 +141,13 @@ public void close() { */ public static class Serialized extends DelayableWriteable { private final Writeable.Reader reader; - private final Version serializedAtVersion; + private final TransportVersion serializedAtVersion; private final NamedWriteableRegistry registry; private final ReleasableBytesReference serialized; private Serialized( Writeable.Reader reader, - Version serializedAtVersion, + TransportVersion serializedAtVersion, NamedWriteableRegistry registry, ReleasableBytesReference serialized ) { @@ -159,7 +159,7 @@ private Serialized( @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion() == serializedAtVersion) { + if (out.getTransportVersion() == serializedAtVersion) { /* * If the version *does* line up we can just copy the bytes * which is good because this is how shard request caching @@ -214,7 +214,7 @@ public void close() { */ public static long getSerializedSize(Writeable ref) { try (CountingStreamOutput out = new CountingStreamOutput()) { - out.setVersion(Version.CURRENT); + out.setTransportVersion(TransportVersion.CURRENT); ref.writeTo(out); return out.size; } catch (IOException exc) { @@ -224,7 +224,7 @@ public static long getSerializedSize(Writeable ref) { private static T deserialize( Reader reader, - Version serializedAtVersion, + TransportVersion serializedAtVersion, NamedWriteableRegistry registry, BytesReference serialized ) throws IOException { @@ -233,7 +233,7 @@ private static T deserialize( ? serialized.streamInput() : new NamedWriteableAwareStreamInput(serialized.streamInput(), registry) ) { - in.setVersion(serializedAtVersion); + in.setTransportVersion(serializedAtVersion); return reader.read(in); } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java index 79fa2995146b..beed68a6ab5b 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/FilterStreamInput.java @@ -8,6 +8,7 @@ package org.elasticsearch.common.io.stream; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.ReleasableBytesReference; @@ -86,13 +87,29 @@ public int available() throws IOException { } @Override + @Deprecated(forRemoval = true) public Version getVersion() { return delegate.getVersion(); } @Override + public TransportVersion getTransportVersion() { + return delegate.getTransportVersion(); + } + + @Override + @Deprecated(forRemoval = true) public void setVersion(Version version) { delegate.setVersion(version); + // also set the version on this stream directly, so that any uses of this.version are still correct + super.setVersion(version); + } + + @Override + public void setTransportVersion(TransportVersion version) { + delegate.setTransportVersion(version); + // also set the version on this stream directly, so that any uses of this.version are still correct + super.setTransportVersion(version); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java index 402397e22ced..439900122c83 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/InputStreamStreamInput.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.common.io.Streams; +import org.elasticsearch.core.Streams; import java.io.EOFException; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java index 7c372f6b52bf..4ebebdbd8e9b 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutput.java @@ -127,7 +127,7 @@ public void writeWithSizePrefix(Writeable writeable) throws IOException { // manipulation of the offsets on the pages after writing to tmp. This will require adjustments to the places in this class // that make assumptions about the page size try (RecyclerBytesStreamOutput tmp = new RecyclerBytesStreamOutput(recycler)) { - tmp.setVersion(getVersion()); + tmp.setTransportVersion(getTransportVersion()); writeable.writeTo(tmp); int size = tmp.size(); writeVInt(size); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 05753b46fa12..faea2ad8bc86 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -17,6 +17,7 @@ import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -83,19 +84,35 @@ */ public abstract class StreamInput extends InputStream { - private Version version = Version.CURRENT; + private TransportVersion version = TransportVersion.CURRENT; /** * The version of the node on the other side of this stream. */ + @Deprecated(forRemoval = true) public Version getVersion() { + return Version.fromId(this.version.id); + } + + /** + * The transport version the data is serialized as. + */ + public TransportVersion getTransportVersion() { return this.version; } /** * Set the version of the node on the other side of this stream. */ + @Deprecated(forRemoval = true) public void setVersion(Version version) { + this.version = version.transportVersion; + } + + /** + * Set the transport version of the data in this stream. + */ + public void setTransportVersion(TransportVersion version) { this.version = version; } @@ -599,6 +616,20 @@ public String[] readOptionalStringArray() throws IOException { return null; } + /** + * Reads an optional byte array. It's effectively the same as readByteArray, except + * it supports null. + * @return a byte array or null + * @throws IOException + */ + @Nullable + public byte[] readOptionalByteArray() throws IOException { + if (readBoolean()) { + return readByteArray(); + } + return null; + } + /** * If the returned map contains any entries it will be mutable. If it is empty it might be immutable. */ @@ -738,8 +769,12 @@ public Object readGenericValue() throws IOException { case 6 -> readByteArray(); case 7 -> readArrayList(); case 8 -> readArray(); - case 9 -> readLinkedHashMap(); - case 10 -> readHashMap(); + case 9 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) + ? readOrderedMap(StreamInput::readGenericValue, StreamInput::readGenericValue) + : readOrderedMap(StreamInput::readString, StreamInput::readGenericValue); + case 10 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) + ? readMap(StreamInput::readGenericValue, StreamInput::readGenericValue) + : readMap(StreamInput::readString, StreamInput::readGenericValue); case 11 -> readByte(); case 12 -> readDate(); case 13 -> @@ -817,30 +852,6 @@ private Object[] readArray() throws IOException { return list8; } - private Map readLinkedHashMap() throws IOException { - int size9 = readArraySize(); - if (size9 == 0) { - return Collections.emptyMap(); - } - Map map9 = Maps.newLinkedHashMapWithExpectedSize(size9); - for (int i = 0; i < size9; i++) { - map9.put(readString(), readGenericValue()); - } - return map9; - } - - private Map readHashMap() throws IOException { - int size10 = readArraySize(); - if (size10 == 0) { - return Collections.emptyMap(); - } - Map map10 = Maps.newMapWithExpectedSize(size10); - for (int i = 0; i < size10; i++) { - map10.put(readString(), readGenericValue()); - } - return map10; - } - private Date readDate() throws IOException { return new Date(readLong()); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 2465d211ef95..19b792f33254 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -17,6 +17,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -77,19 +78,35 @@ public abstract class StreamOutput extends OutputStream { private static final int MAX_NESTED_EXCEPTION_LEVEL = 100; - private Version version = Version.CURRENT; + private TransportVersion version = TransportVersion.CURRENT; /** * The version of the node on the other side of this stream. */ + @Deprecated(forRemoval = true) public Version getVersion() { + return Version.fromId(this.version.id); + } + + /** + * The transport version to serialize the data as. + */ + public TransportVersion getTransportVersion() { return this.version; } /** * Set the version of the node on the other side of this stream. */ + @Deprecated(forRemoval = true) public void setVersion(Version version) { + this.version = version.transportVersion; + } + + /** + * Set the transport version of the data in this stream. + */ + public void setTransportVersion(TransportVersion version) { this.version = version; } @@ -147,7 +164,7 @@ public void writeByteArray(byte[] b) throws IOException { */ public void writeWithSizePrefix(Writeable writeable) throws IOException { final BytesStreamOutput tmp = new BytesStreamOutput(); - tmp.setVersion(version); + tmp.setTransportVersion(version); writeable.writeTo(tmp); writeBytesReference(tmp.bytes()); } @@ -537,6 +554,19 @@ public void writeOptionalStringArray(@Nullable String[] array) throws IOExceptio } } + /** + * Writes a byte array, for null arrays it writes false. + * @param array an array or null + */ + public void writeOptionalByteArray(@Nullable byte[] array) throws IOException { + if (array == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeByteArray(array); + } + } + public void writeGenericMap(@Nullable Map map) throws IOException { writeGenericValue(map); } @@ -561,7 +591,11 @@ public void writeMapWithConsistentOrder(@Nullable Map .iterator(); while (iterator.hasNext()) { Map.Entry next = iterator.next(); - this.writeString(next.getKey()); + if (this.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + this.writeGenericValue(next.getKey()); + } else { + this.writeString(next.getKey()); + } this.writeGenericValue(next.getValue()); } } @@ -688,12 +722,13 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep } else { o.writeByte((byte) 10); } - @SuppressWarnings("unchecked") - final Map map = (Map) v; - o.writeVInt(map.size()); - for (Map.Entry entry : map.entrySet()) { - o.writeString(entry.getKey()); - o.writeGenericValue(entry.getValue()); + if (o.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + final Map map = (Map) v; + o.writeMap(map, StreamOutput::writeGenericValue, StreamOutput::writeGenericValue); + } else { + @SuppressWarnings("unchecked") + final Map map = (Map) v; + o.writeMap(map, StreamOutput::writeString, StreamOutput::writeGenericValue); } }), entry(Byte.class, (o, v) -> { @@ -956,6 +991,14 @@ public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOExcep } } + /** + * This method allow to use a method reference when writing collection elements such as + * {@code out.writeMap(map, StreamOutput::writeString, StreamOutput::writeWriteable)} + */ + public void writeWriteable(Writeable writeable) throws IOException { + writeable.writeTo(this); + } + public void writeException(Throwable throwable) throws IOException { writeException(throwable, throwable, 0); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java index a686f35f394a..78cb36e9a6c9 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutput.java @@ -62,14 +62,14 @@ public void writeOptionalNamedWriteable(@Nullable NamedWriteable namedWriteable) } private void checkVersionCompatibility(VersionedNamedWriteable namedWriteable) { - if (namedWriteable.getMinimalSupportedVersion().after(getVersion())) { + if (namedWriteable.getMinimalSupportedVersion().after(getTransportVersion())) { throw new IllegalArgumentException( "[" + namedWriteable.getWriteableName() + "] was released first in version " + namedWriteable.getMinimalSupportedVersion() + ", failed compatibility check trying to send it to node with version " - + getVersion() + + getTransportVersion() ); } } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java b/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java index 3b0e971225aa..1b25b0969488 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/VersionedNamedWriteable.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.io.stream; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import java.io.IOException; import java.util.Map; @@ -26,7 +26,7 @@ public interface VersionedNamedWriteable extends NamedWriteable { /** * The minimal version of the recipient this object can be sent to */ - Version getMinimalSupportedVersion(); + TransportVersion getMinimalSupportedVersion(); /** * Tests whether or not the custom should be serialized. The criteria is the output stream must be at least the minimum supported @@ -39,7 +39,7 @@ public interface VersionedNamedWriteable extends NamedWriteable { * @return true if the custom should be serialized and false otherwise */ static boolean shouldSerialize(final StreamOutput out, final T custom) { - return out.getVersion().onOrAfter(custom.getMinimalSupportedVersion()); + return out.getTransportVersion().onOrAfter(custom.getMinimalSupportedVersion()); } /** diff --git a/server/src/main/java/org/elasticsearch/common/logging/ChunkedLoggingStream.java b/server/src/main/java/org/elasticsearch/common/logging/ChunkedLoggingStream.java new file mode 100644 index 000000000000..941a6d1f9eb3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/ChunkedLoggingStream.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.ReferenceDocs; +import org.elasticsearch.common.unit.ByteSizeUnit; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Objects; +import java.util.zip.GZIPOutputStream; + +/** + * An {@link OutputStream} which Gzip-compresses the written data, Base64-encodes it, and writes it in fixed-size chunks to a logger. This + * is useful for debugging information that may be too large for a single log message and/or which may include data which cannot be + * recorded faithfully in plain-text (e.g. binary data or data with significant whitespace). + */ +public class ChunkedLoggingStream extends OutputStream { + + static final int CHUNK_SIZE = ByteSizeUnit.KB.toIntBytes(2); + + /** + * Create an {@link OutputStream} which Gzip-compresses the written data, Base64-encodes it, and writes it in fixed-size (2kiB) chunks + * to the given logger. If the data fits into a single chunk then the output looks like this: + * + *
+     * $PREFIX (gzip compressed and base64-encoded; for details see ...): H4sIAAAAA...
+     * 
+ * + * If there are multiple chunks then they are written like this: + * + *
+     * $PREFIX [part 1]: H4sIAAAAA...
+     * $PREFIX [part 2]: r38c4MBHO...
+     * $PREFIX [part 3]: ECyRFONaL...
+     * $PREFIX [part 4]: kTgm+Qswm...
+     * $PREFIX (gzip compressed, base64-encoded, and split into 4 parts on preceding log lines; for details see ...)
+     * 
+ * + * @param logger The logger to receive the chunks of data. + * @param level The log level to use for the logging. + * @param prefix A prefix for each chunk, which should be reasonably unique to allow for reconstruction of the original message + * even if multiple such streams are used concurrently. + * @param referenceDocs A link to the relevant reference docs to help users interpret the output. Relevant reference docs are required + * because the output is rather human-unfriendly and we need somewhere to describe how to decode it. + */ + public static OutputStream create(Logger logger, Level level, String prefix, ReferenceDocs referenceDocs) throws IOException { + return new GZIPOutputStream(Base64.getEncoder().wrap(new ChunkedLoggingStream(logger, level, prefix, referenceDocs))); + } + + private final Logger logger; + private final Level level; + private final String prefix; + private final ReferenceDocs referenceDocs; + + private int chunk; + private int offset; + private boolean closed; + private final byte[] buffer = new byte[CHUNK_SIZE]; + + ChunkedLoggingStream(Logger logger, Level level, String prefix, ReferenceDocs referenceDocs) { + this.logger = Objects.requireNonNull(logger); + this.level = Objects.requireNonNull(level); + this.prefix = Objects.requireNonNull(prefix); + this.referenceDocs = Objects.requireNonNull(referenceDocs); + } + + private void flushBuffer() { + assert closed || offset == CHUNK_SIZE : offset; + assert offset >= 0 && offset <= CHUNK_SIZE : offset; + chunk += 1; + + final var chunkString = new String(buffer, 0, offset, StandardCharsets.ISO_8859_1); + offset = 0; + + if (closed && chunk == 1) { + logger.log(level, "{} (gzip compressed and base64-encoded; for details see {}): {}", prefix, referenceDocs, chunkString); + } else { + logger.log(level, "{} [part {}]: {}", prefix, chunk, chunkString); + } + } + + @Override + public void write(int b) throws IOException { + assert closed == false; + if (offset == CHUNK_SIZE) { + flushBuffer(); + } + buffer[offset] = (byte) b; + assert assertSafeByte(buffer[offset]); + offset += 1; + } + + @Override + public void write(byte[] b, int off, int len) throws IOException { + assert closed == false; + assert assertSafeBytes(b, off, len); + while (len > 0) { + if (offset == CHUNK_SIZE) { + flushBuffer(); + } + var copyLen = Math.min(len, CHUNK_SIZE - offset); + System.arraycopy(b, off, buffer, offset, copyLen); + offset += copyLen; + off += copyLen; + len -= copyLen; + } + } + + @Override + public void close() throws IOException { + if (closed == false) { + closed = true; + flushBuffer(); + if (chunk > 1) { + logger.log( + level, + "{} (gzip compressed, base64-encoded, and split into {} parts on preceding log lines; for details see {})", + prefix, + chunk, + referenceDocs + ); + } + } + } + + private static boolean assertSafeBytes(byte[] b, int off, int len) { + for (int i = off; i < off + len; i++) { + assertSafeByte(b[i]); + } + return true; + } + + private static boolean assertSafeByte(byte b) { + assert 0x20 <= b && b < 0x7f; + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java index 2147b7a5531b..55c1f645a965 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LogConfigurator.java @@ -30,6 +30,7 @@ import org.apache.logging.log4j.status.StatusData; import org.apache.logging.log4j.status.StatusListener; import org.apache.logging.log4j.status.StatusLogger; +import org.apache.logging.log4j.util.Unbox; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.logging.internal.LoggerFactoryImpl; import org.elasticsearch.common.settings.Settings; @@ -41,6 +42,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; +import java.lang.invoke.MethodHandles; import java.nio.charset.StandardCharsets; import java.nio.file.FileVisitOption; import java.nio.file.FileVisitResult; @@ -122,6 +124,7 @@ public static void configure(final Environment environment, boolean useConsole) } configureESLogging(); configure(environment.settings(), environment.configFile(), environment.logsFile(), useConsole); + initializeStatics(); } public static void configureESLogging() { @@ -144,6 +147,17 @@ public static void setNodeName(String nodeName) { NodeNamePatternConverter.setNodeName(nodeName); } + // Some classes within log4j have static initializers that require security manager permissions. + // Here we aggressively initialize those classes during logging configuration so that + // actual logging calls at runtime do not trigger that initialization. + private static void initializeStatics() { + try { + MethodHandles.publicLookup().ensureInitialized(Unbox.class); + } catch (IllegalAccessException impossible) { + throw new AssertionError(impossible); + } + } + private static void checkErrorListener() { assert errorListenerIsRegistered() : "expected error listener to be registered"; if (error.get()) { @@ -246,8 +260,22 @@ public FileVisitResult visitFile(final Path file, final BasicFileAttributes attr // Redirect stdout/stderr to log4j. While we ensure Elasticsearch code does not write to those streams, // third party libraries may do that. Note that we do NOT close the streams because other code may have // grabbed a handle to the streams and intend to write to it, eg log4j for writing to the console - System.setOut(new PrintStream(new LoggingOutputStream(LogManager.getLogger("stdout"), Level.INFO), false, StandardCharsets.UTF_8)); - System.setErr(new PrintStream(new LoggingOutputStream(LogManager.getLogger("stderr"), Level.WARN), false, StandardCharsets.UTF_8)); + System.setOut( + new PrintStream(new LoggingOutputStream(LogManager.getLogger("stdout"), Level.INFO, List.of()), false, StandardCharsets.UTF_8) + ); + System.setErr( + new PrintStream( + new LoggingOutputStream( + LogManager.getLogger("stderr"), + Level.WARN, + // MMapDirectory messages come from Lucene, suggesting to users as a warning that they should enable preview features in + // the JDK + List.of("MMapDirectory") + ), + false, + StandardCharsets.UTF_8 + ) + ); final Logger rootLogger = LogManager.getRootLogger(); Appender appender = Loggers.findAppender(rootLogger, ConsoleAppender.class); diff --git a/server/src/main/java/org/elasticsearch/common/logging/LoggingOutputStream.java b/server/src/main/java/org/elasticsearch/common/logging/LoggingOutputStream.java index 2f38986716df..6fd98259d925 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/LoggingOutputStream.java +++ b/server/src/main/java/org/elasticsearch/common/logging/LoggingOutputStream.java @@ -15,6 +15,7 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.List; /** * A stream whose output is sent to the configured logger, line by line. @@ -42,9 +43,12 @@ static class Buffer { private final Level level; - LoggingOutputStream(Logger logger, Level level) { + private final List messageFilters; + + LoggingOutputStream(Logger logger, Level level, List messageFilters) { this.logger = logger; this.level = level; + this.messageFilters = messageFilters; } @Override @@ -103,8 +107,17 @@ public void close() { threadLocal = null; } + private void log(String msg) { + for (String filter : messageFilters) { + if (msg.contains(filter)) { + return; + } + } + this.log0(msg); + } + // pkg private for testing - void log(String msg) { + protected void log0(String msg) { logger.log(level, msg); } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index cf31d3ff1005..a660730bfb02 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -86,7 +86,7 @@ import java.util.Map; public class Lucene { - public static final String LATEST_CODEC = "Lucene94"; + public static final String LATEST_CODEC = "Lucene95"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java index 5c411c7b397c..1293c59486e7 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -299,7 +299,29 @@ private static boolean termArraysEquals(List termArrays1, List t @Override public void visit(QueryVisitor visitor) { if (visitor.acceptField(field)) { - visitor.visitLeaf(this); // TODO implement term visiting + visitor = visitor.getSubVisitor(BooleanClause.Occur.MUST, this); + for (int i = 0; i < termArrays.size() - 1; i++) { + if (termArrays.get(i).length == 1) { + visitor.consumeTerms(this, termArrays.get(i)[0]); + } else { + QueryVisitor shouldVisitor = visitor.getSubVisitor(BooleanClause.Occur.SHOULD, this); + shouldVisitor.consumeTerms(this, termArrays.get(i)); + } + } + /* We don't report automata here because this breaks the unified highlighter, + which extracts automata separately from phrases. MPPQ gets rewritten to a + SpanMTQQuery by the PhraseHelper in any case, so highlighting is taken + care of there instead. If we extract automata here then the trailing prefix + word will be highlighted wherever it appears in the document, instead of only + as part of a phrase. This can be re-instated once we switch to using Matches + to highlight. + for (Term prefixTerm : termArrays.get(termArrays.size() - 1)) { + visitor.consumeTermsMatching(this, field, () -> { + CompiledAutomaton ca = new CompiledAutomaton(PrefixQuery.toAutomaton(prefixTerm.bytes())); + return ca.runAutomaton; + }); + } + */ } } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java b/server/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java index 24ac1b32bc3b..e0dc4c941187 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInput.java @@ -9,6 +9,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.BitUtil; +import org.elasticsearch.common.Strings; import java.io.EOFException; import java.io.IOException; @@ -65,16 +66,14 @@ public IndexInput slice(String sliceDescription, long offset, long length) throw return new ByteArrayIndexInput(sliceDescription, bytes, this.offset + (int) offset, (int) length); } else { throw new IllegalArgumentException( - "slice() " - + sliceDescription - + " out of bounds: offset=" - + offset - + ",length=" - + length - + ",fileLength=" - + this.length - + ": " - + this + Strings.format( + "slice() %s out of bounds: offset=%d,length=%d,fileLength=%d: %s", + sliceDescription, + offset, + length, + this.length, + this + ) ); } } @@ -99,7 +98,7 @@ public void readBytes(final byte[] b, final int offset, int len) throws IOExcept @Override public short readShort() throws IOException { try { - return (short) BitUtil.VH_LE_SHORT.get(bytes, pos); + return (short) BitUtil.VH_LE_SHORT.get(bytes, pos + offset); } finally { pos += Short.BYTES; } @@ -108,7 +107,7 @@ public short readShort() throws IOException { @Override public int readInt() throws IOException { try { - return (int) BitUtil.VH_LE_INT.get(bytes, pos); + return (int) BitUtil.VH_LE_INT.get(bytes, pos + offset); } finally { pos += Integer.BYTES; } @@ -117,7 +116,7 @@ public int readInt() throws IOException { @Override public long readLong() throws IOException { try { - return (long) BitUtil.VH_LE_LONG.get(bytes, pos); + return (long) BitUtil.VH_LE_LONG.get(bytes, pos + offset); } finally { pos += Long.BYTES; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/store/BytesReferenceIndexInput.java b/server/src/main/java/org/elasticsearch/common/lucene/store/BytesReferenceIndexInput.java new file mode 100644 index 000000000000..b40dc36e1111 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/lucene/store/BytesReferenceIndexInput.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.lucene.store; + +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.EOFException; +import java.io.IOException; + +public class BytesReferenceIndexInput extends IndexInput { + + private final BytesReference bytesReference; + + private int filePointer; + private StreamInput streamInput; + + public BytesReferenceIndexInput(String resourceDescription, BytesReference bytesReference) { + this(resourceDescription, bytesReference, 0); + } + + private BytesReferenceIndexInput(String resourceDescription, BytesReference bytesReference, int filePointer) { + super(resourceDescription); + this.bytesReference = bytesReference; + this.filePointer = filePointer; + } + + @Override + public void close() throws IOException {} + + @Override + public long getFilePointer() { + return filePointer; + } + + private StreamInput getOrOpenStreamInput() throws IOException { + if (streamInput == null) { + streamInput = bytesReference.slice(filePointer, bytesReference.length() - filePointer).streamInput(); + } + return streamInput; + } + + @Override + public void seek(long longPos) throws IOException { + if (longPos < 0) { + throw new IllegalArgumentException("Seeking to negative position: " + longPos); + } else if (longPos > bytesReference.length()) { + throw new EOFException("seek past EOF"); + } + var pos = (int) longPos; + if (pos < filePointer) { + streamInput = null; + } else if (streamInput != null) { + final var toSkip = pos - filePointer; + final var skipped = streamInput.skip(toSkip); + assert skipped == toSkip; + } + filePointer = pos; + } + + @Override + public long length() { + return bytesReference.length(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + if (offset >= 0L && length >= 0L && offset + length <= bytesReference.length()) { + return new BytesReferenceIndexInput(sliceDescription, bytesReference.slice((int) offset, (int) length)); + } else { + throw new IllegalArgumentException( + Strings.format( + "slice() %s out of bounds: offset=%d,length=%d,fileLength=%d: %s", + sliceDescription, + offset, + length, + bytesReference.length(), + this + ) + ); + } + } + + @Override + public byte readByte() throws IOException { + try { + return getOrOpenStreamInput().readByte(); + } finally { + filePointer += 1; + } + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + getOrOpenStreamInput().readBytes(b, offset, len); + filePointer += len; + } + + @Override + public short readShort() throws IOException { + try { + return Short.reverseBytes(getOrOpenStreamInput().readShort()); + } finally { + filePointer += Short.BYTES; + } + } + + @Override + public int readInt() throws IOException { + try { + return Integer.reverseBytes(getOrOpenStreamInput().readInt()); + } finally { + filePointer += Integer.BYTES; + } + } + + @Override + public long readLong() throws IOException { + try { + return Long.reverseBytes(getOrOpenStreamInput().readLong()); + } finally { + filePointer += Long.BYTES; + } + } + + @SuppressWarnings("MethodDoesntCallSuperMethod") + @Override + public IndexInput clone() { + return new BytesReferenceIndexInput(toString(), bytesReference, filePointer); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java b/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java index 04fe48c01607..c7abaa781792 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/store/InputStreamIndexInput.java @@ -43,6 +43,9 @@ public int read(byte[] b, int off, int len) throws IOException { } else if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } + if (len == 0) { + return 0; + } if (indexInput.getFilePointer() >= indexInput.length()) { return -1; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index 50c6da7418d8..a60c3f10b215 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -8,15 +8,18 @@ package org.elasticsearch.common.lucene.uid; +import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; @@ -52,10 +55,15 @@ final class PerThreadIDVersionAndSeqNoLookup { /** used for assertions to make sure class usage meets assumptions */ private final Object readerKey; + final boolean loadedTimestampRange; + final long minTimestamp; + final long maxTimestamp; + /** * Initialize lookup for the provided segment */ - PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField, boolean trackReaderKey) throws IOException { + PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField, boolean trackReaderKey, boolean loadTimestampRange) + throws IOException { this.uidField = uidField; final Terms terms = reader.terms(uidField); if (terms == null) { @@ -84,10 +92,20 @@ final class PerThreadIDVersionAndSeqNoLookup { Object readerKey = null; assert trackReaderKey ? (readerKey = reader.getCoreCacheHelper().getKey()) != null : readerKey == null; this.readerKey = readerKey; + + this.loadedTimestampRange = loadTimestampRange; + if (loadTimestampRange) { + PointValues tsPointValues = reader.getPointValues(DataStream.TimestampField.FIXED_TIMESTAMP_FIELD); + minTimestamp = LongPoint.decodeDimension(tsPointValues.getMinPackedValue(), 0); + maxTimestamp = LongPoint.decodeDimension(tsPointValues.getMaxPackedValue(), 0); + } else { + minTimestamp = 0; + maxTimestamp = Long.MAX_VALUE; + } } - PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField) throws IOException { - this(reader, uidField, true); + PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField, boolean loadTimestampRange) throws IOException { + this(reader, uidField, true, loadTimestampRange); } /** Return null if id is not found. diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 7098e17bfeaa..b4433a36d498 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -13,9 +13,12 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.util.CloseableThreadLocal; +import org.elasticsearch.Assertions; +import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import java.io.IOException; +import java.util.Base64; import java.util.List; import java.util.Objects; import java.util.concurrent.ConcurrentMap; @@ -34,7 +37,8 @@ public final class VersionsAndSeqNoResolver { } }; - private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, String uidField) throws IOException { + private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, String uidField, boolean loadTimestampRange) + throws IOException { // We cache on the top level // This means cache entries have a shorter lifetime, maybe as low as 1s with the // default refresh interval and a steady indexing rate, but on the other hand it @@ -59,9 +63,24 @@ private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader rea if (lookupState == null) { lookupState = new PerThreadIDVersionAndSeqNoLookup[reader.leaves().size()]; for (LeafReaderContext leaf : reader.leaves()) { - lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), uidField); + lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), uidField, loadTimestampRange); } ctl.set(lookupState); + } else { + if (Assertions.ENABLED) { + // Ensure cached lookup instances have loaded timestamp range if that was requested + for (PerThreadIDVersionAndSeqNoLookup lookup : lookupState) { + if (lookup.loadedTimestampRange != loadTimestampRange) { + throw new AssertionError( + "Mismatch between lookup.loadedTimestampRange [" + + lookup.loadedTimestampRange + + "] and loadTimestampRange [" + + loadTimestampRange + + "]" + ); + } + } + } } if (lookupState.length != reader.leaves().size()) { @@ -117,8 +136,8 @@ public static class DocIdAndSeqNo { *
  • a doc ID and a version otherwise * */ - public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term, boolean loadSeqNo) throws IOException { - PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field()); + public static DocIdAndVersion timeSeriesLoadDocIdAndVersion(IndexReader reader, Term term, boolean loadSeqNo) throws IOException { + PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field(), false); List leaves = reader.leaves(); // iterate backwards to optimize for the frequently updated documents // which are likely to be in the last segments @@ -133,11 +152,58 @@ public static DocIdAndVersion loadDocIdAndVersion(IndexReader reader, Term term, return null; } + /** + * A special variant of loading docid and version in case of time series indices. + *

    + * Makes use of the fact that timestamp is part of the id, the existence of @timestamp field and + * that segments are sorted by {@link org.elasticsearch.cluster.metadata.DataStream#TIMESERIES_LEAF_READERS_SORTER}. + * This allows this method to know whether there is no document with the specified id without loading the docid for + * the specified id. + * + * @param reader The reader load docid, version and seqno from. + * @param uid The term that describes the uid of the document to load docid, version and seqno for. + * @param id The id that contains the encoded timestamp. The timestamp is used to skip checking the id for entire segments. + * @param loadSeqNo Whether to load sequence number from _seq_no doc values field. + * @return the internal doc ID and version for the specified term from the specified reader or + * returning null if no document was found for the specified id + * @throws IOException In case of an i/o related failure + */ + public static DocIdAndVersion timeSeriesLoadDocIdAndVersion(IndexReader reader, Term uid, String id, boolean loadSeqNo) + throws IOException { + byte[] idAsBytes = Base64.getUrlDecoder().decode(id); + assert idAsBytes.length == 20; + // id format: [4 bytes (basic hash routing fields), 8 bytes prefix of 128 murmurhash dimension fields, 8 bytes + // @timestamp) + long timestamp = ByteUtils.readLongBE(idAsBytes, 12); + + PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, uid.field(), true); + List leaves = reader.leaves(); + // iterate in default order, the segments should be sorted by DataStream#TIMESERIES_LEAF_READERS_SORTER + long prevMaxTimestamp = Long.MAX_VALUE; + for (final LeafReaderContext leaf : leaves) { + PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; + assert lookup.loadedTimestampRange; + assert prevMaxTimestamp >= lookup.maxTimestamp; + if (timestamp < lookup.minTimestamp) { + continue; + } + if (timestamp > lookup.maxTimestamp) { + return null; + } + DocIdAndVersion result = lookup.lookupVersion(uid.bytes(), loadSeqNo, leaf); + if (result != null) { + return result; + } + prevMaxTimestamp = lookup.maxTimestamp; + } + return null; + } + public static DocIdAndVersion loadDocIdAndVersionUncached(IndexReader reader, Term term, boolean loadSeqNo) throws IOException { List leaves = reader.leaves(); for (int i = leaves.size() - 1; i >= 0; i--) { final LeafReaderContext leaf = leaves.get(i); - PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), term.field(), false); + PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), term.field(), false, false); DocIdAndVersion result = lookup.lookupVersion(term.bytes(), loadSeqNo, leaf); if (result != null) { return result; @@ -151,7 +217,7 @@ public static DocIdAndVersion loadDocIdAndVersionUncached(IndexReader reader, Te * The result is either null or the live and latest version of the given uid. */ public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException { - final PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field()); + final PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field(), false); final List leaves = reader.leaves(); // iterate backwards to optimize for the frequently updated documents // which are likely to be in the last segments diff --git a/server/src/main/java/org/elasticsearch/common/metrics/Counters.java b/server/src/main/java/org/elasticsearch/common/metrics/Counters.java index 2496f741dde2..4d34e6489f20 100644 --- a/server/src/main/java/org/elasticsearch/common/metrics/Counters.java +++ b/server/src/main/java/org/elasticsearch/common/metrics/Counters.java @@ -87,11 +87,11 @@ public boolean hasCounters() { * }, * "foobar": 5 * } - * @return A nested map with all the current configured counters + * @return A mutable nested map with all the current configured counters. The map is mutable to allow the client to further enrich it. * @throws IllegalStateException if there is a conflict in a path of two counters for example `foo`: 1 and `foo.bar`: 1. */ @SuppressWarnings("unchecked") - public Map toNestedMap() { + public Map toMutableNestedMap() { Map root = new HashMap<>(); for (var counter : counters.entrySet()) { Map currentLevel = root; diff --git a/server/src/main/java/org/elasticsearch/common/regex/Regex.java b/server/src/main/java/org/elasticsearch/common/regex/Regex.java index ff2ac47ad209..73975e6c91e6 100644 --- a/server/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/server/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -11,13 +11,17 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.Strings; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Set; +import java.util.function.Predicate; import java.util.regex.Pattern; public class Regex { @@ -90,6 +94,40 @@ public static Automaton simpleMatchToAutomaton(String... patterns) { return Operations.union(automata); } + /** + * Create a {@link Predicate} that matches the given patterns. Evaluating + * the returned predicate against a {@link String} yields the same result as + * running {@link #simpleMatch(String[], String)} but may run faster, + * especially in the case when there are multiple patterns. + */ + public static Predicate simpleMatcher(String... patterns) { + if (patterns == null || patterns.length == 0) { + return str -> false; + } + boolean hasWildcard = false; + for (String pattern : patterns) { + if (isMatchAllPattern(pattern)) { + return str -> true; + } + if (isSimpleMatchPattern(pattern)) { + hasWildcard = true; + break; + } + } + if (patterns.length == 1) { + if (hasWildcard) { + return str -> simpleMatch(patterns[0], str); + } else { + return patterns[0]::equals; + } + } else if (hasWildcard == false) { + return Set.copyOf(Arrays.asList(patterns))::contains; + } else { + Automaton automaton = simpleMatchToAutomaton(patterns); + return new CharacterRunAutomaton(automaton)::run; + } + } + /** * Match a String against the given pattern, supporting the following simple * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index 579d4a7c9eea..f35228354404 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -434,6 +434,20 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu addSettingsUpdateConsumer(setting, consumer, (s) -> {}); } + /** + * This methods passes the setting value to a consumer during the initialization and on every setting change + *

    + * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + *

    + */ + public synchronized void initializeAndWatch(Setting setting, Consumer consumer) { + assert setting.getProperties().contains(Setting.Property.Dynamic) + || setting.getProperties().contains(Setting.Property.OperatorDynamic) : "Can only watch dynamic settings"; + assert setting.getProperties().contains(Setting.Property.NodeScope) : "Can only watch node settings"; + consumer.accept(setting.get(settings)); + addSettingsUpdateConsumer(setting, consumer); + } + protected void validateDeprecatedAndRemovedSettingV7(Settings settings, Setting setting) {} /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index f45758df42d8..c7a294b56c95 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.bulk.WriteAckDelay; +import org.elasticsearch.action.ingest.SimulatePipelineTransportAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; @@ -40,6 +41,7 @@ import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceComputer; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider; @@ -110,6 +112,7 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ProxyConnectionStrategy; +import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteConnectionStrategy; import org.elasticsearch.transport.SniffConnectionStrategy; @@ -130,6 +133,14 @@ */ public final class ClusterSettings extends AbstractScopedSettings { + public static ClusterSettings createBuiltInClusterSettings() { + return createBuiltInClusterSettings(Settings.EMPTY); + } + + public static ClusterSettings createBuiltInClusterSettings(Settings nodeSettings) { + return new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + public ClusterSettings(final Settings nodeSettings, final Set> settingsSet) { this(nodeSettings, settingsSet, Collections.emptySet()); } @@ -198,6 +209,7 @@ public void apply(Settings value, Settings current, Settings previous) { BalancedShardsAllocator.WRITE_LOAD_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, + DesiredBalanceComputer.PROGRESS_LOG_INTERVAL_SETTING, BreakerSettings.CIRCUIT_BREAKER_LIMIT_SETTING, BreakerSettings.CIRCUIT_BREAKER_OVERHEAD_SETTING, BreakerSettings.CIRCUIT_BREAKER_TYPE, @@ -316,6 +328,7 @@ public void apply(Settings value, Settings current, Settings previous) { HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, IndexModule.NODE_STORE_ALLOW_MMAP, + IndexSettings.NODE_DEFAULT_REFRESH_INTERVAL_SETTING, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, @@ -535,9 +548,24 @@ public void apply(Settings value, Settings current, Settings previous) { HealthNodeTaskExecutor.ENABLED_SETTING, LocalHealthMonitor.POLL_INTERVAL_SETTING, TransportHealthNodeAction.HEALTH_NODE_TRANSPORT_ACTION_TIMEOUT, + SimulatePipelineTransportAction.INGEST_NODE_TRANSPORT_ACTION_TIMEOUT, WriteAckDelay.WRITE_ACK_DELAY_INTERVAL, WriteAckDelay.WRITE_ACK_DELAY_RANDOMNESS_BOUND, - TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterService.REMOTE_CLUSTER_AUTHORIZATION : null + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterService.REMOTE_CLUSTER_AUTHORIZATION : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.REMOTE_CLUSTER_PORT_ENABLED : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.HOST : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.PUBLISH_HOST : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.BIND_HOST : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.PORT : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.PUBLISH_PORT : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_KEEP_ALIVE : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_KEEP_IDLE : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_KEEP_INTERVAL : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_KEEP_COUNT : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_NO_DELAY : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_REUSE_ADDRESS : null, + TcpTransport.isUntrustedRemoteClusterEnabled() ? RemoteClusterPortSettings.TCP_SEND_BUFFER_SIZE : null, + StatelessSecureSettings.STATELESS_SECURE_SETTINGS ).filter(Objects::nonNull).collect(Collectors.toSet()); static List> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index c4a0477bcec6..8e5b06056bcc 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -25,6 +25,9 @@ import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -80,7 +83,7 @@ private enum EntryType { } /** An entry in the keystore. The bytes are opaque and interpreted based on the entry type. */ - private static class Entry { + private static class Entry implements Writeable { final byte[] bytes; final byte[] sha256Digest; @@ -88,6 +91,17 @@ private static class Entry { this.bytes = bytes; this.sha256Digest = MessageDigests.sha256().digest(bytes); } + + Entry(StreamInput input) throws IOException { + this.bytes = input.readByteArray(); + this.sha256Digest = input.readByteArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByteArray(bytes); + out.writeByteArray(sha256Digest); + } } /** @@ -167,6 +181,14 @@ private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes this.dataBytes = dataBytes; } + public KeyStoreWrapper(StreamInput input) throws IOException { + formatVersion = input.readInt(); + hasPassword = input.readBoolean(); + dataBytes = input.readOptionalByteArray(); + entries.set(input.readMap(StreamInput::readString, Entry::new)); + closed = input.readBoolean(); + } + /** * Get the metadata format version for the keystore **/ @@ -614,4 +636,14 @@ public synchronized void close() { } } } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(formatVersion); + out.writeBoolean(hasPassword); + out.writeOptionalByteArray(dataBytes); + var entriesMap = entries.get(); + out.writeMap((entriesMap == null) ? Map.of() : entriesMap, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeBoolean(closed); + } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java new file mode 100644 index 000000000000..8157db24463c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java @@ -0,0 +1,186 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.settings; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.env.Environment; +import org.elasticsearch.reservedstate.service.ReservedStateVersion; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParserConfiguration; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.GeneralSecurityException; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.xcontent.XContentType.JSON; + +/** + * An implementation of {@link SecureSettings} which loads the secrets from + * externally mounted local directory. It looks for the folder called 'secrets' + * under the config directory. All secure settings should be supplied in a single + * file called 'secrets.json' which sits inside the 'secrets' directory. + *

    + * If the 'secrets' directory or the 'secrets.json' file don't exist, the + * SecureSettings implementation is loaded with empty settings map. + *

    + * Example secrets.json format: + * { + * "metadata": { + * "version": "1", + * "compatibility": "8.7.0" + * }, + * "secrets": { + * "secure.setting.key.one": "aaa", + * "secure.setting.key.two": "bbb" + * } + * } + */ +public class LocallyMountedSecrets implements SecureSettings { + + public static final String SECRETS_FILE_NAME = "secrets.json"; + public static final String SECRETS_DIRECTORY = "secrets"; + + public static final ParseField SECRETS_FIELD = new ParseField("secrets"); + public static final ParseField METADATA_FIELD = new ParseField("metadata"); + + @SuppressWarnings("unchecked") + private final ConstructingObjectParser secretsParser = new ConstructingObjectParser<>( + "locally_mounted_secrets", + a -> new LocalFileSecrets((Map) a[0], (ReservedStateVersion) a[1]) + ); + + private final String secretsDir; + private final String secretsFile; + private final SetOnce secrets = new SetOnce<>(); + + /** + * Direct constructor to be used by the CLI + */ + public LocallyMountedSecrets(Environment environment) { + var secretsDirPath = environment.configFile().toAbsolutePath().resolve(SECRETS_DIRECTORY); + var secretsFilePath = secretsDirPath.resolve(SECRETS_FILE_NAME); + secretsParser.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.map(), SECRETS_FIELD); + secretsParser.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> ReservedStateVersion.parse(p), METADATA_FIELD); + if (Files.exists(secretsDirPath) && Files.exists(secretsFilePath)) { + try { + secrets.set(processSecretsFile(secretsFilePath)); + } catch (IOException e) { + throw new IllegalStateException("Error processing secrets file", e); + } + } else { + secrets.set(new LocalFileSecrets(Map.of(), new ReservedStateVersion(-1L, Version.CURRENT))); + } + this.secretsDir = secretsDirPath.toString(); + this.secretsFile = secretsFilePath.toString(); + } + + /** + * Used by {@link org.elasticsearch.bootstrap.ServerArgs} to deserialize the secrets + * when they are received by the Elasticsearch process. The ServerCli code serializes + * the secrets as part of ServerArgs. + */ + public LocallyMountedSecrets(StreamInput in) throws IOException { + this.secretsDir = in.readString(); + this.secretsFile = in.readString(); + if (in.readBoolean()) { + secrets.set(LocalFileSecrets.readFrom(in)); + } + // TODO: Add support for watching for file changes here. + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(secretsDir); + out.writeString(secretsFile); + if (secrets.get() == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + secrets.get().writeTo(out); + } + } + + @Override + public boolean isLoaded() { + return secrets.get() != null; + } + + @Override + public Set getSettingNames() { + assert isLoaded(); + return secrets.get().map().keySet(); + } + + @Override + public SecureString getString(String setting) { + assert isLoaded(); + var value = secrets.get().map().get(setting); + if (value == null) { + return null; + } + return new SecureString(value.toCharArray()); + } + + @Override + public InputStream getFile(String setting) throws GeneralSecurityException { + assert isLoaded(); + return new ByteArrayInputStream(getString(setting).toString().getBytes(StandardCharsets.UTF_8)); + } + + @Override + public byte[] getSHA256Digest(String setting) throws GeneralSecurityException { + assert isLoaded(); + return MessageDigests.sha256().digest(getString(setting).toString().getBytes(StandardCharsets.UTF_8)); + } + + @Override + public void close() throws IOException { + if (null != secrets.get() && secrets.get().map().isEmpty() == false) { + for (var entry : secrets.get().map().entrySet()) { + entry.setValue(null); + } + } + } + + // package private for testing + LocalFileSecrets processSecretsFile(Path path) throws IOException { + try ( + var fis = Files.newInputStream(path); + var bis = new BufferedInputStream(fis); + var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) + ) { + return secretsParser.apply(parser, null); + } + } + + record LocalFileSecrets(Map map, ReservedStateVersion metadata) implements Writeable { + public static LocalFileSecrets readFrom(StreamInput in) throws IOException { + return new LocalFileSecrets(in.readMap(StreamInput::readString, StreamInput::readString), ReservedStateVersion.readFrom(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap((map == null) ? Map.of() : map, StreamOutput::writeString, StreamOutput::writeString); + metadata.writeTo(out); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java b/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java index 25edbe6e846b..3b7d5c901877 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/SecureSettings.java @@ -8,6 +8,8 @@ package org.elasticsearch.common.settings; +import org.elasticsearch.common.io.stream.Writeable; + import java.io.Closeable; import java.io.IOException; import java.io.InputStream; @@ -17,7 +19,7 @@ /** * An accessor for settings which are securely stored. See {@link SecureSetting}. */ -public interface SecureSettings extends Closeable { +public interface SecureSettings extends Closeable, Writeable { /** Returns true iff the settings are loaded and retrievable. */ boolean isLoaded(); @@ -26,7 +28,7 @@ public interface SecureSettings extends Closeable { Set getSettingNames(); /** Return a string setting. The {@link SecureString} should be closed once it is used. */ - SecureString getString(String setting) throws GeneralSecurityException; + SecureString getString(String setting); /** Return a file setting. The {@link InputStream} should be closed once it is used. */ InputStream getFile(String setting) throws GeneralSecurityException; diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 1ebb189a358e..bbd43da1f83b 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -1503,7 +1503,7 @@ public Set getSettingNames() { } @Override - public SecureString getString(String setting) throws GeneralSecurityException { + public SecureString getString(String setting) { return delegate.getString(addPrefix.apply(setting)); } @@ -1521,6 +1521,11 @@ public byte[] getSHA256Digest(String setting) throws GeneralSecurityException { public void close() throws IOException { delegate.close(); } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IllegalStateException("Unsupported operation"); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/settings/StatelessSecureSettings.java b/server/src/main/java/org/elasticsearch/common/settings/StatelessSecureSettings.java new file mode 100644 index 000000000000..98c1f188de2f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/StatelessSecureSettings.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * An implementation of secure settings from YML settings. + * + * WARNING: this is a temporary class only for Stateless. It applies only to YML settings with a predetermined prefix. + */ +public class StatelessSecureSettings implements SecureSettings { + static final String PREFIX = "insecure."; + static final Setting.AffixSetting STATELESS_SECURE_SETTINGS = Setting.prefixKeySetting( + PREFIX, + (key) -> Setting.simpleString(key, Setting.Property.NodeScope) + ); + + private final Settings settings; + private final Set names; + + private StatelessSecureSettings(Settings settings) { + if (DiscoveryNode.isStateless(settings) == false) { + throw new IllegalArgumentException("StatelessSecureSettings are supported only in stateless"); + } + this.settings = Settings.builder().put(settings, false).build(); + this.names = settings.keySet() + .stream() + .filter(key -> (key.startsWith(PREFIX))) + .map(s -> s.replace(PREFIX, "")) + .collect(Collectors.toUnmodifiableSet()); + } + + public static Settings install(Settings settings) { + StatelessSecureSettings statelessSecureSettings = new StatelessSecureSettings(settings); + return Settings.builder().put(settings, false).setSecureSettings(statelessSecureSettings).build(); + } + + @Override + public boolean isLoaded() { + return true; + } + + @Override + public Set getSettingNames() { + return names; + } + + @Override + public SecureString getString(String setting) { + return new SecureString(STATELESS_SECURE_SETTINGS.getConcreteSetting(PREFIX + setting).get(settings).toCharArray()); + } + + @Override + public InputStream getFile(String setting) throws GeneralSecurityException { + return new ByteArrayInputStream( + STATELESS_SECURE_SETTINGS.getConcreteSetting(PREFIX + setting).get(settings).getBytes(StandardCharsets.UTF_8) + ); + } + + @Override + public byte[] getSHA256Digest(String setting) { + return MessageDigests.sha256() + .digest(STATELESS_SECURE_SETTINGS.getConcreteSetting(PREFIX + setting).get(settings).getBytes(StandardCharsets.UTF_8)); + } + + @Override + public void close() throws IOException {} + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IllegalStateException("Unsupported operation"); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index b95bdd3615f3..89db778266ea 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -29,8 +29,8 @@ public class Processors implements Writeable, Comparable, ToXContent public static final Processors ZERO = new Processors(0.0); public static final Processors MAX_PROCESSORS = new Processors(Double.MAX_VALUE); - public static final Version FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; - public static final Version DOUBLE_PROCESSORS_SUPPORT_VERSION = Version.V_8_5_0; + public static final TransportVersion FLOAT_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion DOUBLE_PROCESSORS_SUPPORT_VERSION = TransportVersion.V_8_5_0; static final int NUMBER_OF_DECIMAL_PLACES = 5; private static final double MIN_REPRESENTABLE_PROCESSORS = 1E-5; @@ -63,9 +63,9 @@ public static Processors of(Double count) { public static Processors readFrom(StreamInput in) throws IOException { final double processorCount; - if (in.getVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (in.getTransportVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { processorCount = in.readInt(); - } else if (in.getVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + } else if (in.getTransportVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { processorCount = in.readFloat(); } else { processorCount = in.readDouble(); @@ -75,10 +75,10 @@ public static Processors readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { + if (out.getTransportVersion().before(FLOAT_PROCESSORS_SUPPORT_VERSION)) { assert hasDecimals() == false; out.writeInt((int) count); - } else if (out.getVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + } else if (out.getTransportVersion().before(DOUBLE_PROCESSORS_SUPPORT_VERSION)) { out.writeFloat((float) count); } else { out.writeDouble(count); @@ -143,7 +143,7 @@ private boolean hasDecimals() { return ((int) count) != Math.ceil(count); } - public boolean isCompatibleWithVersion(Version version) { + public boolean isCompatibleWithVersion(TransportVersion version) { if (version.onOrAfter(FLOAT_PROCESSORS_SUPPORT_VERSION)) { return true; } diff --git a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java index 37e9fc4049cb..16411d8dd1ff 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/ArrayUtils.java @@ -84,4 +84,19 @@ public static String[] append(String[] array, String added) { updated[array.length] = added; return updated; } + + /** + * Reverse the {@code length} values on the array starting from {@code offset}. + */ + public static void reverseSubArray(double[] array, int offset, int length) { + int start = offset; + int end = offset + length; + while (end > start) { + final double scratch = array[start]; + array[start] = array[end - 1]; + array[end - 1] = scratch; + start++; + end--; + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index 7fc329148181..edec336c2a02 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -152,6 +152,13 @@ public boolean hasArray() { public byte[] array() { return array; } + + @Override + public void writeTo(StreamOutput out) throws IOException { + int size = Math.toIntExact(size()) * Byte.BYTES; + out.writeVInt(size); + out.write(array, 0, size); + } } private static class ByteArrayAsIntArrayWrapper extends AbstractArrayWrapper implements IntArray { @@ -261,6 +268,13 @@ public void set(long index, byte[] buf, int offset, int len) { assert index >= 0 && index < size(); System.arraycopy(buf, offset << 3, array, (int) index << 3, len << 3); } + + @Override + public void writeTo(StreamOutput out) throws IOException { + int size = Math.toIntExact(size()) * Long.BYTES; + out.writeVInt(size); + out.write(array, 0, size); + } } private static class ByteArrayAsDoubleArrayWrapper extends AbstractArrayWrapper implements DoubleArray { diff --git a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java index 108e5b1a4bfc..72a2fc41a9a1 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigByteArray.java @@ -11,9 +11,12 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.util.Arrays; +import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.BYTE_PAGE_SIZE; /** @@ -36,6 +39,11 @@ final class BigByteArray extends AbstractBigArray implements ByteArray { } } + @Override + public void writeTo(StreamOutput out) throws IOException { + writePages(out, Math.toIntExact(size), pages, Byte.BYTES, BYTE_PAGE_SIZE); + } + @Override public byte get(long index) { final int pageIndex = pageIndex(index); diff --git a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java index ecfbfc5b9c6b..b2a55973cd44 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigDoubleArray.java @@ -18,6 +18,7 @@ import java.nio.ByteOrder; import java.util.Arrays; +import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.DOUBLE_PAGE_SIZE; /** @@ -26,12 +27,6 @@ */ final class BigDoubleArray extends AbstractBigArray implements DoubleArray { - static { - if (ByteOrder.nativeOrder() != ByteOrder.LITTLE_ENDIAN) { - throw new Error("The deserialization assumes this class is written with little-endian numbers."); - } - } - private static final BigDoubleArray ESTIMATOR = new BigDoubleArray(0, BigArrays.NON_RECYCLING_INSTANCE, false); static final VarHandle VH_PLATFORM_NATIVE_DOUBLE = MethodHandles.byteArrayViewVarHandle(double[].class, ByteOrder.nativeOrder()); @@ -134,18 +129,6 @@ public void set(long index, byte[] buf, int offset, int len) { @Override public void writeTo(StreamOutput out) throws IOException { - int size = (int) this.size; - out.writeVInt(size * Double.BYTES); - int lastPageEnd = size % DOUBLE_PAGE_SIZE; - if (lastPageEnd == 0) { - for (byte[] page : pages) { - out.write(page); - } - return; - } - for (int i = 0; i < pages.length - 1; i++) { - out.write(pages[i]); - } - out.write(pages[pages.length - 1], 0, lastPageEnd * Double.BYTES); + writePages(out, Math.toIntExact(size), pages, Double.BYTES, DOUBLE_PAGE_SIZE); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java index e3cf7389f7ed..e053baea9aa5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigIntArray.java @@ -18,6 +18,7 @@ import java.nio.ByteOrder; import java.util.Arrays; +import static org.elasticsearch.common.util.BigLongArray.writePages; import static org.elasticsearch.common.util.PageCacheRecycler.INT_PAGE_SIZE; /** @@ -25,12 +26,6 @@ * configurable length. */ final class BigIntArray extends AbstractBigArray implements IntArray { - static { - if (ByteOrder.nativeOrder() != ByteOrder.LITTLE_ENDIAN) { - throw new Error("The deserialization assumes this class is written with little-endian ints."); - } - } - private static final BigIntArray ESTIMATOR = new BigIntArray(0, BigArrays.NON_RECYCLING_INSTANCE, false); static final VarHandle VH_PLATFORM_NATIVE_INT = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.nativeOrder()); @@ -49,22 +44,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray { @Override public void writeTo(StreamOutput out) throws IOException { - if (size > Integer.MAX_VALUE / Integer.BYTES) { - throw new IllegalArgumentException(); - } - int intSize = (int) size; - out.writeVInt(intSize * Integer.BYTES); - int lastPageEnd = intSize % INT_PAGE_SIZE; - if (lastPageEnd == 0) { - for (byte[] page : pages) { - out.write(page); - } - return; - } - for (int i = 0; i < pages.length - 1; i++) { - out.write(pages[i]); - } - out.write(pages[pages.length - 1], 0, lastPageEnd * Integer.BYTES); + writePages(out, (int) size, pages, Integer.BYTES, INT_PAGE_SIZE); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java index 8e7bb52f5a11..1044b5fb78ee 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigLongArray.java @@ -10,7 +10,9 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.io.stream.StreamOutput; +import java.io.IOException; import java.lang.invoke.MethodHandles; import java.lang.invoke.VarHandle; import java.nio.ByteOrder; @@ -126,4 +128,24 @@ public static long estimateRamBytes(final long size) { public void set(long index, byte[] buf, int offset, int len) { set(index, buf, offset, len, pages, 3); } + + @Override + public void writeTo(StreamOutput out) throws IOException { + writePages(out, Math.toIntExact(size), pages, Long.BYTES, LONG_PAGE_SIZE); + } + + static void writePages(StreamOutput out, int size, byte[][] pages, int bytesPerValue, int pageSize) throws IOException { + out.writeVInt(size * bytesPerValue); + int lastPageEnd = size % pageSize; + if (lastPageEnd == 0) { + for (byte[] page : pages) { + out.write(page); + } + return; + } + for (int i = 0; i < pages.length - 1; i++) { + out.write(pages[i]); + } + out.write(pages[pages.length - 1], 0, lastPageEnd * bytesPerValue); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java index 61c218310396..e3b51ee7d2e3 100644 --- a/server/src/main/java/org/elasticsearch/common/util/ByteArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/ByteArray.java @@ -9,13 +9,20 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import java.io.IOException; import java.nio.ByteBuffer; /** * Abstraction of an array of byte values. */ -public interface ByteArray extends BigArray { +public interface ByteArray extends BigArray, Writeable { + + static ByteArray readFrom(StreamInput in) throws IOException { + return new ReleasableByteArray(in); + } /** * Get an element given its index. diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 79a177b48869..1664b4690bc7 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -9,7 +9,6 @@ package org.elasticsearch.common.util; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Iterators; import java.nio.file.Path; import java.util.AbstractList; @@ -21,7 +20,6 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.ListIterator; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.RandomAccess; @@ -100,44 +98,56 @@ public static int[] toArray(Collection ints) { * @param messageHint A string to be included in the exception message if the call fails, to provide * more context to the handler of the exception */ - public static void ensureNoSelfReferences(Object value, String messageHint) { - Iterable it = convert(value); - if (it != null) { - ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>()), messageHint); - } + public static void ensureNoSelfReferences(final Object value, final String messageHint) { + ensureNoSelfReferences(value, Collections.newSetFromMap(new IdentityHashMap<>()), messageHint); } - @SuppressWarnings("unchecked") - private static Iterable convert(Object value) { - if (value == null) { - return null; - } - if (value instanceof Map map) { - return () -> Iterators.concat(map.keySet().iterator(), map.values().iterator()); - } else if ((value instanceof Iterable) && (value instanceof Path == false)) { - return (Iterable) value; + private static void ensureNoSelfReferences(final Object value, final Set ancestors, final String messageHint) { + // these instanceof checks are a bit on the ugly side, but it's important for performance that we have + // a separate dispatch point for Maps versus for Iterables. a polymorphic version of this code would + // be prettier, but it would also likely be quite a bit slower. this is a hot path for ingest pipelines, + // and performance here is important. + if (value == null || value instanceof String || value instanceof Number || value instanceof Boolean) { + // noop + } else if (value instanceof Map m && m.isEmpty() == false) { + ensureNoSelfReferences(m, ancestors, messageHint); + } else if ((value instanceof Iterable i) && (value instanceof Path == false)) { + ensureNoSelfReferences(i, i, ancestors, messageHint); } else if (value instanceof Object[]) { - return Arrays.asList((Object[]) value); - } else { - return null; + // note: the iterable and reference arguments are different + ensureNoSelfReferences(Arrays.asList((Object[]) value), value, ancestors, messageHint); } } + private static void ensureNoSelfReferences(final Map reference, final Set ancestors, final String messageHint) { + addToAncestorsOrThrow(reference, ancestors, messageHint); + for (Map.Entry e : reference.entrySet()) { + ensureNoSelfReferences(e.getKey(), ancestors, messageHint); + ensureNoSelfReferences(e.getValue(), ancestors, messageHint); + } + ancestors.remove(reference); + } + private static void ensureNoSelfReferences( - final Iterable value, - Object originalReference, + final Iterable iterable, + final Object reference, final Set ancestors, - String messageHint + final String messageHint ) { - if (value != null) { - if (ancestors.add(originalReference) == false) { - String suffix = Strings.isNullOrEmpty(messageHint) ? "" : String.format(Locale.ROOT, " (%s)", messageHint); - throw new IllegalArgumentException("Iterable object is self-referencing itself" + suffix); - } - for (Object o : value) { - ensureNoSelfReferences(convert(o), o, ancestors, messageHint); + addToAncestorsOrThrow(reference, ancestors, messageHint); + for (Object o : iterable) { + ensureNoSelfReferences(o, ancestors, messageHint); + } + ancestors.remove(reference); + } + + private static void addToAncestorsOrThrow(Object reference, Set ancestors, String messageHint) { + if (ancestors.add(reference) == false) { + StringBuilder sb = new StringBuilder("Iterable object is self-referencing itself"); + if (Strings.hasLength(messageHint)) { + sb.append(" (").append(messageHint).append(")"); } - ancestors.remove(originalReference); + throw new IllegalArgumentException(sb.toString()); } } @@ -178,7 +188,7 @@ public static ArrayList iterableAsArrayList(Iterable element throw new NullPointerException("elements"); } if (elements instanceof Collection) { - return new ArrayList<>((Collection) elements); + return new ArrayList<>((Collection) elements); } else { ArrayList list = new ArrayList<>(); for (E element : elements) { diff --git a/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java b/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java new file mode 100644 index 000000000000..08d86c143fc1 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/CopyOnFirstWriteMap.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +/** + * This map is designed to be constructed from an immutable map and be copied only if a (rare) mutation operation occurs. + * It should be converted back to an immutable map using `org.elasticsearch.common.util.LazyCopyOnWriteMap#toImmutableMap()`. + */ +public class CopyOnFirstWriteMap implements Map { + + private Map source; + private boolean wasCopied = false; + + public CopyOnFirstWriteMap(Map source) { + this.source = source; + } + + private Map getForUpdate() { + if (wasCopied == false) { + source = new HashMap<>(source); + wasCopied = true; + } + return source; + } + + private Map getForRead() { + return source; + } + + public Map toImmutableMap() { + return Map.copyOf(getForRead()); + } + + @Override + public int size() { + return getForRead().size(); + } + + @Override + public boolean isEmpty() { + return getForRead().isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return getForRead().containsKey(key); + } + + @Override + public boolean containsValue(Object value) { + return getForRead().containsValue(value); + } + + @Override + public V get(Object key) { + return getForRead().get(key); + } + + @Override + public V put(K key, V value) { + return getForUpdate().put(key, value); + } + + @Override + public V remove(Object key) { + return getForUpdate().remove(key); + } + + @Override + public void putAll(Map m) { + getForUpdate().putAll(m); + } + + @Override + public void clear() { + getForUpdate().clear(); + } + + @Override + public Set keySet() { + return Collections.unmodifiableSet(getForRead().keySet()); + } + + @Override + public Collection values() { + return Collections.unmodifiableCollection(getForRead().values()); + } + + @Override + public Set> entrySet() { + return Collections.unmodifiableSet(getForRead().entrySet()); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/util/LazyMap.java b/server/src/main/java/org/elasticsearch/common/util/LazyMap.java deleted file mode 100644 index 7b6be425985e..000000000000 --- a/server/src/main/java/org/elasticsearch/common/util/LazyMap.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.common.util; - -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Supplier; - -public class LazyMap implements Map { - - private final Supplier> mapSupplier; - private volatile Map map; - - public LazyMap(Supplier> mapSupplier) { - this.mapSupplier = mapSupplier; - } - - private Map get() { - if (map == null) { - synchronized (this) { - if (map == null) { - map = Objects.requireNonNullElse(mapSupplier.get(), Collections.emptyMap()); - } - } - } - return map; - } - - @Override - public int size() { - return get().size(); - } - - @Override - public boolean isEmpty() { - return get().isEmpty(); - } - - @Override - public boolean containsKey(Object key) { - return get().containsKey(key); - } - - @Override - public boolean containsValue(Object value) { - return get().containsValue(value); - } - - @Override - public V get(Object key) { - return get().get(key); - } - - @Override - public V put(K key, V value) { - return get().put(key, value); - } - - @Override - public V remove(Object key) { - return get().remove(key); - } - - @Override - public void putAll(Map m) { - get().putAll(m); - } - - @Override - public void clear() { - get().clear(); - } - - @Override - public Set keySet() { - return get().keySet(); - } - - @Override - public Collection values() { - return get().values(); - } - - @Override - public Set> entrySet() { - return get().entrySet(); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - return get().equals(o); - } - - @Override - public int hashCode() { - return get().hashCode(); - } - - @Override - public String toString() { - return get().toString(); - } - - // Override default methods in Map - @Override - public V getOrDefault(Object k, V defaultValue) { - return get().getOrDefault(k, defaultValue); - } - - @Override - public void forEach(BiConsumer action) { - get().forEach(action); - } - - @Override - public void replaceAll(BiFunction function) { - get().replaceAll(function); - } - - @Override - public V putIfAbsent(K key, V value) { - return get().putIfAbsent(key, value); - } - - @Override - public boolean remove(Object key, Object value) { - return get().remove(key, value); - } - - @Override - public boolean replace(K key, V oldValue, V newValue) { - return get().replace(key, oldValue, newValue); - } - - @Override - public V replace(K key, V value) { - return get().replace(key, value); - } - - @Override - public V computeIfAbsent(K key, Function mappingFunction) { - return get().computeIfAbsent(key, mappingFunction); - } - - @Override - public V computeIfPresent(K key, BiFunction remappingFunction) { - return get().computeIfPresent(key, remappingFunction); - } - - @Override - public V compute(K key, BiFunction remappingFunction) { - return get().compute(key, remappingFunction); - } - - @Override - public V merge(K key, V value, BiFunction remappingFunction) { - return get().merge(key, value, remappingFunction); - } -} diff --git a/server/src/main/java/org/elasticsearch/common/util/LongArray.java b/server/src/main/java/org/elasticsearch/common/util/LongArray.java index 984f1df48701..bd293a135640 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongArray.java @@ -8,10 +8,19 @@ package org.elasticsearch.common.util; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; + /** * Abstraction of an array of long values. */ -public interface LongArray extends BigArray { +public interface LongArray extends BigArray, Writeable { + + static LongArray readFrom(StreamInput in) throws IOException { + return new ReleasableLongArray(in); + } /** * Get an element given its index. diff --git a/server/src/main/java/org/elasticsearch/common/util/Maps.java b/server/src/main/java/org/elasticsearch/common/util/Maps.java index a0ff346da0d9..276c36f8ce95 100644 --- a/server/src/main/java/org/elasticsearch/common/util/Maps.java +++ b/server/src/main/java/org/elasticsearch/common/util/Maps.java @@ -295,7 +295,14 @@ static int capacity(int expectedSize) { * This method creates a copy of the {@code source} map using {@code copyValueFunction} to create a defensive copy of each value. */ public static Map copyOf(Map source, Function copyValueFunction) { - var copy = Maps.newHashMapWithExpectedSize(source.size()); + return transformValues(source, copyValueFunction); + } + + /** + * Copy a map and transform it values using supplied function + */ + public static Map transformValues(Map source, Function copyValueFunction) { + var copy = Maps.newHashMapWithExpectedSize(source.size()); for (var entry : source.entrySet()) { copy.put(entry.getKey(), copyValueFunction.apply(entry.getValue())); } diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java new file mode 100644 index 000000000000..0102195f4e80 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableByteArray.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.common.util.BigArrays.indexIsInt; + +public class ReleasableByteArray implements ByteArray { + + private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(ReleasableByteArray.class); + + private final ReleasableBytesReference ref; + + ReleasableByteArray(StreamInput in) throws IOException { + this.ref = in.readReleasableBytesReference(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBytesReference(ref); + } + + @Override + public long size() { + return ref.length() / Byte.BYTES; + } + + @Override + public byte get(long index) { + assert indexIsInt(index); + return ref.get((int) index); + } + + @Override + public boolean get(long index, int len, BytesRef ref) { + assert indexIsInt(index); + BytesReference sliced = this.ref.slice((int) index, len); + if (sliced.length() != 0) { + ref.offset = sliced.arrayOffset(); + ref.length = sliced.length(); + ref.bytes = sliced.array(); + return true; + } else { + return false; + } + } + + @Override + public byte set(long index, byte value) { + throw new UnsupportedOperationException(); + } + + @Override + public void set(long index, byte[] buf, int offset, int len) { + throw new UnsupportedOperationException(); + } + + @Override + public void fill(long fromIndex, long toIndex, byte value) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasArray() { + return ref.hasArray(); + } + + @Override + public byte[] array() { + // The assumption of this method is that the returned array has valid entries starting from slot 0 and + // this isn't case when just returning the array from ReleasableBytesReference#array(). + // The interface that this class implements should have something like an arrayOffset() method, + // so that callers know from what array offset the first actual byte starts. + throw new UnsupportedOperationException(); + } + + @Override + public long ramBytesUsed() { + /* + * If we return the size of the buffer that we've sliced + * we're likely to double count things. + */ + return SHALLOW_SIZE; + } + + @Override + public void close() { + ref.decRef(); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java b/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java new file mode 100644 index 000000000000..44764ea1e171 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/ReleasableLongArray.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.bytes.ReleasableBytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class ReleasableLongArray implements LongArray { + + private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(ReleasableLongArray.class); + + private final ReleasableBytesReference ref; + + ReleasableLongArray(StreamInput in) throws IOException { + this.ref = in.readReleasableBytesReference(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBytesReference(ref); + } + + @Override + public long size() { + return ref.length() / Long.BYTES; + } + + @Override + public long get(long index) { + if (index > Integer.MAX_VALUE / Long.BYTES) { + // We can't serialize messages longer than 2gb anyway + throw new ArrayIndexOutOfBoundsException(); + } + return ref.getLongLE((int) index * Long.BYTES); + } + + @Override + public long set(long index, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public long increment(long index, long inc) { + throw new UnsupportedOperationException(); + } + + @Override + public void fill(long fromIndex, long toIndex, long value) { + throw new UnsupportedOperationException(); + } + + @Override + public void set(long index, byte[] buf, int offset, int len) { + throw new UnsupportedOperationException(); + } + + @Override + public long ramBytesUsed() { + /* + * If we return the size of the buffer that we've sliced + * we're likely to double count things. + */ + return SHALLOW_SIZE; + } + + @Override + public void close() { + ref.decRef(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java new file mode 100644 index 000000000000..ea37dad5ba21 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractThrottledTaskRunner.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.core.Strings; + +import java.util.Queue; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * {@link AbstractThrottledTaskRunner} runs the enqueued tasks using the given executor, limiting the number of tasks that are submitted to + * the executor at once. + */ +public class AbstractThrottledTaskRunner> { + private static final Logger logger = LogManager.getLogger(AbstractThrottledTaskRunner.class); + + private final String taskRunnerName; + // The max number of tasks that this runner will schedule to concurrently run on the executor. + private final int maxRunningTasks; + // As we fork off dequeued tasks to the given executor, technically the following counter represents + // the number of the concurrent pollAndSpawn calls currently checking the queue for a task to run. This + // doesn't necessarily correspond to currently running tasks, since a pollAndSpawn could return without + // actually running a task when the queue is empty. + private final AtomicInteger runningTasks = new AtomicInteger(); + private final Queue tasks; + private final Executor executor; + + public AbstractThrottledTaskRunner(final String name, final int maxRunningTasks, final Executor executor, final Queue taskQueue) { + assert maxRunningTasks > 0; + this.taskRunnerName = name; + this.maxRunningTasks = maxRunningTasks; + this.executor = executor; + this.tasks = taskQueue; + } + + /** + * Submits a task for execution. If there are fewer than {@code maxRunningTasks} tasks currently running then this task is immediately + * submitted to the executor. Otherwise this task is enqueued and will be submitted to the executor in turn on completion of some other + * task. + * + * Tasks are executed via their {@link ActionListener#onResponse} method, receiving a {@link Releasable} which must be closed on + * completion of the task. Task which are rejected from their executor are notified via their {@link ActionListener#onFailure} method. + * Neither of these methods may themselves throw exceptions. + */ + public void enqueueTask(final T task) { + logger.trace("[{}] enqueuing task {}", taskRunnerName, task); + tasks.add(task); + // Try to run a task since now there is at least one in the queue. If the maxRunningTasks is + // reached, the task is just enqueued. + pollAndSpawn(); + } + + /** + * Allows certain tasks to force their execution, bypassing the queue-length limit on the executor. See also {@link + * AbstractRunnable#isForceExecution()}. + */ + protected boolean isForceExecution(@SuppressWarnings("unused") /* TODO test this */ T task) { + return false; + } + + private void pollAndSpawn() { + // A pollAndSpawn attempts to run a new task. There could be many concurrent pollAndSpawn calls competing + // to get a "free slot", since we attempt to run a new task on every enqueueTask call and every time an + // existing task is finished. + while (incrementRunningTasks()) { + T task = tasks.poll(); + if (task == null) { + logger.trace("[{}] task queue is empty", taskRunnerName); + // We have taken up a "free slot", but there are no tasks in the queue! This could happen each time a worker + // sees an empty queue after running a task. Decrement to give competing pollAndSpawn calls a chance! + int decremented = runningTasks.decrementAndGet(); + assert decremented >= 0; + // We might have blocked all competing pollAndSpawn calls. This could happen for example when + // maxRunningTasks=1 and a task got enqueued just after checking the queue but before decrementing. + // To be sure, return only if the queue is still empty. If the queue is not empty, this might be the + // only pollAndSpawn call in progress, and returning without peeking would risk ending up with a + // non-empty queue and no workers! + if (tasks.peek() == null) break; + } else { + final boolean isForceExecution = isForceExecution(task); + executor.execute(new AbstractRunnable() { + private boolean rejected; // need not be volatile - if we're rejected then that happens-before calling onAfter + + private final Releasable releasable = Releasables.releaseOnce(() -> { + // To avoid missing to run tasks that are enqueued and waiting, we check the queue again once running + // a task is finished. + int decremented = runningTasks.decrementAndGet(); + assert decremented >= 0; + + if (rejected == false) { + pollAndSpawn(); + } + }); + + @Override + public boolean isForceExecution() { + return isForceExecution; + } + + @Override + public void onRejection(Exception e) { + logger.trace("[{}] task {} rejected", taskRunnerName, task); + rejected = true; + try { + task.onFailure(e); + } finally { + releasable.close(); + } + } + + @Override + public void onFailure(Exception e) { + // should not happen + logger.error(() -> Strings.format("[%s] task %s failed", taskRunnerName, task), e); + assert false : e; + task.onFailure(e); + } + + @Override + protected void doRun() { + logger.trace("[{}] running task {}", taskRunnerName, task); + task.onResponse(releasable); + } + + @Override + public String toString() { + return task.toString(); + } + }); + } + } + } + + // Each worker thread that runs a task, first needs to get a "free slot" in order to respect maxRunningTasks. + private boolean incrementRunningTasks() { + int preUpdateValue = runningTasks.getAndAccumulate(maxRunningTasks, (v, maxRunning) -> v < maxRunning ? v + 1 : v); + assert preUpdateValue <= maxRunningTasks; + return preUpdateValue < maxRunningTasks; + } + + // exposed for testing + int runningTasks() { + return runningTasks.get(); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java index bbbea0f31d8d..ba7b6fadb2ae 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java @@ -12,51 +12,49 @@ import java.util.concurrent.atomic.AtomicInteger; /** - * A simple thread safe count-down class that in contrast to a {@link CountDownLatch} - * never blocks. This class is useful if a certain action has to wait for N concurrent - * tasks to return or a timeout to occur in order to proceed. + * A simple thread-safe count-down class that does not block, unlike a {@link CountDownLatch}. This class is useful if an action must wait + * for N concurrent tasks to succeed, or some other task to fail, in order to proceed. When called enough times, exactly one invocation of + * {@link #countDown()} or {@link #fastForward()} will return {@code true}. */ public final class CountDown { private final AtomicInteger countDown; - private final int originalCount; public CountDown(int count) { + // A count of zero was permitted in older versions, but is trappy (requiring explicit special handling) because countDown() and + // fastForward() would never return true. This case is now forbidden in tests, forcing callers to handle the special case up-front, + // but the older behaviour is preserved when assertions are disabled. See #92196. + // TODO drop this leniency once we are confident nothing relies on the older behaviour any more. + assert count > 0 : "count must be positive"; if (count < 0) { throw new IllegalArgumentException("count must be greater or equal to 0 but was: " + count); } - this.originalCount = count; this.countDown = new AtomicInteger(count); } + private static int assertValidCount(int count) { + assert count >= 0 : count; + return count; + } + /** - * Decrements the count-down and returns true iff this call - * reached zero otherwise false + * Decrements the count and returns {@code true} if and only if the count reached zero with this call. */ public boolean countDown() { - assert originalCount > 0; - return countDown.getAndUpdate(current -> { - assert current >= 0; - return current == 0 ? 0 : current - 1; - }) == 1; + return countDown.getAndUpdate(current -> assertValidCount(current) == 0 ? 0 : current - 1) == 1; } /** - * Fast forwards the count-down to zero and returns true iff - * the count down reached zero with this fast forward call otherwise - * false + * Fast-forwards the count to zero and returns {@code true} if and only if the count reached zero with this call. */ public boolean fastForward() { - assert originalCount > 0; - assert countDown.get() >= 0; - return countDown.getAndSet(0) > 0; + return assertValidCount(countDown.getAndSet(0)) > 0; } /** - * Returns true iff the count-down has reached zero. Otherwise false + * Returns {@code true} if and only if the count has reached zero. */ public boolean isCountedDown() { - assert countDown.get() >= 0; - return countDown.get() == 0; + return assertValidCount(countDown.get()) == 0; } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java index a378d4020112..f2788d278c81 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ListenableFuture.java @@ -105,39 +105,31 @@ protected void done(boolean ignored) { } private void notifyListenerDirectly(ActionListener listener) { - try { - // call get in a non-blocking fashion as we could be on a network thread - // or another thread like the scheduler, which we should never block! - assert done; - V value = FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS); - listener.onResponse(value); - } catch (Exception e) { - listener.onFailure(e); - } + // call get in a non-blocking fashion as we could be on a network thread + // or another thread like the scheduler, which we should never block! + assert done; + ActionListener.completeWith(listener, () -> FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS)); } private void notifyListener(ActionListener listener, ExecutorService executorService) { - try { - executorService.execute(new Runnable() { - @Override - public void run() { - notifyListenerDirectly(listener); - } + ActionListener.run(listener, l -> executorService.execute(new Runnable() { + @Override + public void run() { + notifyListenerDirectly(l); + } - @Override - public String toString() { - return "ListenableFuture notification"; - } - }); - } catch (Exception e) { - listener.onFailure(e); - } + @Override + public String toString() { + return "ListenableFuture notification"; + } + })); } @Override public void onResponse(V v) { final boolean set = set(v); if (set == false) { + assert false; throw new IllegalStateException("did not set value, value or exception already set?"); } } @@ -146,6 +138,7 @@ public void onResponse(V v) { public void onFailure(Exception e) { final boolean set = setException(e); if (set == false) { + assert false; throw new IllegalStateException("did not set exception, value already set or exception already set?"); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunner.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunner.java index 9bc7617a4c79..96d6b0ed9471 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunner.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/PrioritizedThrottledTaskRunner.java @@ -8,100 +8,82 @@ package org.elasticsearch.common.util.concurrent; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Releasable; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.Executor; import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; /** * {@link PrioritizedThrottledTaskRunner} performs the enqueued tasks in the order dictated by the * natural ordering of the tasks, limiting the max number of concurrently running tasks. Each new task * that is dequeued to be run, is forked off to the given executor. */ -public class PrioritizedThrottledTaskRunner & Runnable> { - private static final Logger logger = LogManager.getLogger(PrioritizedThrottledTaskRunner.class); - - private final String taskRunnerName; - // The max number of tasks that this runner will schedule to concurrently run on the executor. - private final int maxRunningTasks; - // As we fork off dequeued tasks to the given executor, technically the following counter represents - // the number of the concurrent pollAndSpawn calls currently checking the queue for a task to run. This - // doesn't necessarily correspond to currently running tasks, since a pollAndSpawn could return without - // actually running a task when the queue is empty. - private final AtomicInteger runningTasks = new AtomicInteger(); - private final BlockingQueue tasks = new PriorityBlockingQueue<>(); - private final Executor executor; +public class PrioritizedThrottledTaskRunner> { - public PrioritizedThrottledTaskRunner(final String name, final int maxRunningTasks, final Executor executor) { - assert maxRunningTasks > 0; - this.taskRunnerName = name; - this.maxRunningTasks = maxRunningTasks; - this.executor = executor; - } + private final AbstractThrottledTaskRunner> runner; + private final PriorityBlockingQueue> queue; - public void enqueueTask(final T task) { - logger.trace("[{}] enqueuing task {}", taskRunnerName, task); - tasks.add(task); - // Try to run a task since now there is at least one in the queue. If the maxRunningTasks is - // reached, the task is just enqueued. - pollAndSpawn(); - } + private static class TaskWrapper> + implements + ActionListener, + Comparable> { + + private final T task; + + TaskWrapper(T task) { + this.task = task; + } + + @Override + public int compareTo(TaskWrapper o) { + return task.compareTo(o.task); + } + + @Override + public String toString() { + return task.toString(); + } - // visible for testing - protected void pollAndSpawn() { - // A pollAndSpawn attempts to run a new task. There could be many concurrent pollAndSpawn calls competing - // to get a "free slot", since we attempt to run a new task on every enqueueTask call and every time an - // existing task is finished. - while (incrementRunningTasks()) { - T task = tasks.poll(); - if (task == null) { - logger.trace("[{}] task queue is empty", taskRunnerName); - // We have taken up a "free slot", but there are no tasks in the queue! This could happen each time a worker - // sees an empty queue after running a task. Decrement to give competing pollAndSpawn calls a chance! - int decremented = runningTasks.decrementAndGet(); - assert decremented >= 0; - // We might have blocked all competing pollAndSpawn calls. This could happen for example when - // maxRunningTasks=1 and a task got enqueued just after checking the queue but before decrementing. - // To be sure, return only if the queue is still empty. If the queue is not empty, this might be the - // only pollAndSpawn call in progress, and returning without peeking would risk ending up with a - // non-empty queue and no workers! - if (tasks.peek() == null) break; - } else { - executor.execute(() -> runTask(task)); + @Override + public void onResponse(Releasable releasable) { + try (releasable) { + task.run(); } } + + @Override + public void onFailure(Exception e) { + assert e instanceof EsRejectedExecutionException : e; + try { + task.onRejection(e); + } finally { + task.onAfter(); + } + } + } + + public PrioritizedThrottledTaskRunner(final String name, final int maxRunningTasks, final Executor executor) { + this.queue = new PriorityBlockingQueue<>(); + this.runner = new AbstractThrottledTaskRunner<>(name, maxRunningTasks, executor, queue); } - // Each worker thread that runs a task, first needs to get a "free slot" in order to respect maxRunningTasks. - private boolean incrementRunningTasks() { - int preUpdateValue = runningTasks.getAndAccumulate(maxRunningTasks, (v, maxRunning) -> v < maxRunning ? v + 1 : v); - assert preUpdateValue <= maxRunningTasks; - return preUpdateValue < maxRunningTasks; + /** + * Submits a task for execution. If there are fewer than {@code maxRunningTasks} tasks currently running then this task is immediately + * submitted to the executor. Otherwise this task is enqueued and will be submitted to the executor in turn on completion of some other + * task. + */ + public void enqueueTask(final T task) { + runner.enqueueTask(new TaskWrapper<>(task)); } // Only use for testing public int runningTasks() { - return runningTasks.get(); + return runner.runningTasks(); } // Only use for testing public int queueSize() { - return tasks.size(); - } - - private void runTask(final T task) { - try { - logger.trace("[{}] running task {}", taskRunnerName, task); - task.run(); - } finally { - // To avoid missing to run tasks that are enqueued and waiting, we check the queue again once running - // a task is finished. - int decremented = runningTasks.decrementAndGet(); - assert decremented >= 0; - pollAndSpawn(); - } + return queue.size(); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java index 0b9334a03c44..e14f2d6463fa 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java @@ -8,25 +8,24 @@ package org.elasticsearch.common.util.concurrent; import java.util.Objects; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; /** - * Runnable that can only be run one time. + * Runnable that prevents running its delegate more than once. */ public class RunOnce implements Runnable { - private final Runnable delegate; - private final AtomicBoolean hasRun; + private final AtomicReference delegateRef; public RunOnce(final Runnable delegate) { - this.delegate = Objects.requireNonNull(delegate); - this.hasRun = new AtomicBoolean(false); + delegateRef = new AtomicReference<>(Objects.requireNonNull(delegate)); } @Override public void run() { - if (hasRun.compareAndSet(false, true)) { - delegate.run(); + var acquired = delegateRef.getAndSet(null); + if (acquired != null) { + acquired.run(); } } @@ -34,6 +33,11 @@ public void run() { * {@code true} if the {@link RunOnce} has been executed once. */ public boolean hasRun() { - return hasRun.get(); + return delegateRef.get() == null; + } + + @Override + public String toString() { + return "RunOnce[" + delegateRef.get() + "]"; } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 16d11a3cbc3c..ef004bb5d9e5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -312,11 +312,29 @@ public StoredContext newStoredContext() { /** * Just like {@link #stashContext()} but no default context is set. Instead, the {@code transientHeadersToClear} argument can be used - * to clear specific transient headers in the new context. All headers (with the possible exception of {@code responseHeaders}) are - * restored by closing the returned {@link StoredContext}. - * + * to clear specific transient headers in the new context and {@code requestHeadersToClear} can be used to clear specific request + * headers. All original headers (without the {@code responseHeaders}) are restored by closing the returned {@link StoredContext}. + */ + public StoredContext newStoredContext(Collection transientHeadersToClear, Collection requestHeadersToClear) { + return newStoredContext(false, transientHeadersToClear, requestHeadersToClear); + } + + /** + * Just like {@link #newStoredContext(Collection, Collection)} but all headers are restored to original, + * except of {@code responseHeaders} which will be preserved from the restore thread. */ - public StoredContext newStoredContext(Collection transientHeadersToClear) { + public StoredContext newStoredContextPreservingResponseHeaders( + Collection transientHeadersToClear, + Collection requestHeadersToClear + ) { + return newStoredContext(true, transientHeadersToClear, requestHeadersToClear); + } + + private StoredContext newStoredContext( + boolean preserveResponseHeaders, + Collection transientHeadersToClear, + Collection requestHeadersToClear + ) { final ThreadContextStruct originalContext = threadLocal.get(); // clear specific transient headers from the current context Map newTransientHeaders = null; @@ -328,18 +346,34 @@ public StoredContext newStoredContext(Collection transientHeadersToClear newTransientHeaders.remove(transientHeaderToClear); } } - // this is the context when this method returns - if (newTransientHeaders != null) { + Map newRequestHeaders = null; + for (String requestHeaderToClear : requestHeadersToClear) { + if (originalContext.requestHeaders.containsKey(requestHeaderToClear)) { + if (newRequestHeaders == null) { + newRequestHeaders = new HashMap<>(originalContext.requestHeaders); + } + newRequestHeaders.remove(requestHeaderToClear); + } + } + if (newTransientHeaders != null || newRequestHeaders != null) { ThreadContextStruct threadContextStruct = new ThreadContextStruct( - originalContext.requestHeaders, + newRequestHeaders != null ? newRequestHeaders : originalContext.requestHeaders, originalContext.responseHeaders, - newTransientHeaders, + newTransientHeaders != null ? newTransientHeaders : originalContext.transientHeaders, originalContext.isSystemContext, originalContext.warningHeadersSize ); threadLocal.set(threadContextStruct); } - return storedOriginalContext(originalContext); + // this is the context when this method returns + final ThreadContextStruct newContext = threadLocal.get(); + return () -> { + if (preserveResponseHeaders && threadLocal.get() != newContext) { + threadLocal.set(originalContext.putResponseHeaders(threadLocal.get().responseHeaders)); + } else { + threadLocal.set(originalContext); + } + }; } /** @@ -510,6 +544,13 @@ public T getTransient(String key) { return (T) threadLocal.get().transientHeaders.get(key); } + /** + * Returns unmodifiable copy of all transient headers. + */ + public Map getTransientHeaders() { + return Collections.unmodifiableMap(threadLocal.get().transientHeaders); + } + /** * Add the {@code value} for the specified {@code key} Any duplicate {@code value} is ignored. * diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledTaskRunner.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledTaskRunner.java new file mode 100644 index 000000000000..674e58ee766d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThrottledTaskRunner.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Releasable; + +import java.util.concurrent.Executor; + +public class ThrottledTaskRunner extends AbstractThrottledTaskRunner> { + // a simple AbstractThrottledTaskRunner which fixes the task type and uses a regular FIFO blocking queue. + public ThrottledTaskRunner(String name, int maxRunningTasks, Executor executor) { + super(name, maxRunningTasks, executor, ConcurrentCollections.newBlockingQueue()); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/util/set/Sets.java b/server/src/main/java/org/elasticsearch/common/util/set/Sets.java index 9fa9f6adc34f..207bc844e522 100644 --- a/server/src/main/java/org/elasticsearch/common/util/set/Sets.java +++ b/server/src/main/java/org/elasticsearch/common/util/set/Sets.java @@ -159,6 +159,14 @@ public static Set union(Set left, Set right) { return union; } + /** + * The intersection of two sets. Namely, the resulting set contains all the elements that are in both sets. + * Neither input is mutated by this operation, an entirely new set is returned. + * + * @param set1 the first set + * @param set2 the second set + * @return the unmodifiable intersection of the two sets + */ public static Set intersection(Set set1, Set set2) { Objects.requireNonNull(set1); Objects.requireNonNull(set2); @@ -171,7 +179,21 @@ public static Set intersection(Set set1, Set set2) { left = set2; right = set1; } - return left.stream().filter(right::contains).collect(Collectors.toSet()); + + final Set empty = Set.of(); + Set result = empty; + for (T t : left) { + if (right.contains(t)) { + if (result == empty) { + // delay allocation of a non-empty result set + result = new HashSet<>(); + } + result.add(t); + } + } + + // the empty set is already unmodifiable + return result == empty ? result : Collections.unmodifiableSet(result); } /** diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContent.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContent.java index f565df41a7c0..a519b518e4c8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContent.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContent.java @@ -8,38 +8,80 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import java.io.IOException; import java.util.Iterator; /** - * An extension of {@link ToXContent} that can be serialized in chunks by creating an {@link Iterator}. - * This is used by the REST layer to implement flow control that does not rely on blocking the serializing thread when writing the - * serialized bytes to a non-blocking channel. + * An alternative to {@link ToXContent} allowing for progressive serialization by creating an {@link Iterator} of {@link ToXContent} chunks. + *

    + * The REST layer only serializes enough chunks at once to keep an outbound buffer full, rather than consuming all the time and memory + * needed to serialize the entire response as must be done with the regular {@link ToXContent} responses. */ public interface ChunkedToXContent { /** - * Create an iterator of {@link ToXContent} chunks, that must be serialized individually with the same {@link XContentBuilder} and - * {@link ToXContent.Params} for each call until it is fully drained. + * Create an iterator of {@link ToXContent} chunks for a REST response. Each chunk is serialized with the same {@link XContentBuilder} + * and {@link ToXContent.Params}, which is also the same as the {@link ToXContent.Params} passed as the {@code params} argument. For + * best results, all chunks should be {@code O(1)} size. See also {@link ChunkedToXContentHelper} for some handy utilities. + *

    + * Note that chunked response bodies cannot send deprecation warning headers once transmission has started, so implementations must + * check for deprecated feature use before returning. + * * @return iterator over chunks of {@link ToXContent} */ - Iterator toXContentChunked(); + Iterator toXContentChunked(ToXContent.Params params); /** - * Wraps the given instance in a {@link ToXContentObject} that will fully serialize the instance when serialized. + * Create an iterator of {@link ToXContent} chunks for a response to the {@link RestApiVersion#V_7} API. Each chunk is serialized with + * the same {@link XContentBuilder} and {@link ToXContent.Params}, which is also the same as the {@link ToXContent.Params} passed as the + * {@code params} argument. For best results, all chunks should be {@code O(1)} size. See also {@link ChunkedToXContentHelper} for some + * handy utilities. + *

    + * Similar to {@link #toXContentChunked} but for the {@link RestApiVersion#V_7} API. By default this method delegates to {@link + * #toXContentChunked}. + *

    + * Note that chunked response bodies cannot send deprecation warning headers once transmission has started, so implementations must + * check for deprecated feature use before returning. + * + * @return iterator over chunks of {@link ToXContent} + */ + default Iterator toXContentChunkedV7(ToXContent.Params params) { + return toXContentChunked(params); + } + + /** + * Wraps the given instance in a {@link ToXContent} that will fully serialize the instance when serialized. + * * @param chunkedToXContent instance to wrap - * @return x-content object + * @return x-content instance */ - static ToXContentObject wrapAsXContentObject(ChunkedToXContent chunkedToXContent) { - return (builder, params) -> { - Iterator serialization = chunkedToXContent.toXContentChunked(); - while (serialization.hasNext()) { - serialization.next().toXContent(builder, params); + static ToXContent wrapAsToXContent(ChunkedToXContent chunkedToXContent) { + return new ToXContent() { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Iterator serialization = chunkedToXContent.toXContentChunked(params); + while (serialization.hasNext()) { + serialization.next().toXContent(builder, params); + } + return builder; + } + + @Override + public boolean isFragment() { + return chunkedToXContent.isFragment(); } - return builder; }; } + + /** + * @return true iff this instance serializes as a fragment. See {@link ToXContentObject} for additional details. + */ + default boolean isFragment() { + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java new file mode 100644 index 000000000000..6a51d708b57a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.xcontent; + +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.xcontent.ToXContent; + +import java.util.Iterator; +import java.util.Map; +import java.util.function.Function; + +public enum ChunkedToXContentHelper { + ; + + public static Iterator startObject() { + return Iterators.single(((builder, params) -> builder.startObject())); + } + + public static Iterator startObject(String name) { + return Iterators.single(((builder, params) -> builder.startObject(name))); + } + + public static Iterator endObject() { + return Iterators.single(((builder, params) -> builder.endObject())); + } + + public static Iterator startArray(String name) { + return Iterators.single(((builder, params) -> builder.startArray(name))); + } + + public static Iterator endArray() { + return Iterators.single(((builder, params) -> builder.endArray())); + } + + public static Iterator map(String name, Map map) { + return map(name, map, entry -> (ToXContent) (builder, params) -> builder.field(entry.getKey(), entry.getValue())); + } + + public static Iterator xContentFragmentValuesMap(String name, Map map) { + return map( + name, + map, + entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder.startObject(entry.getKey()), params).endObject() + ); + } + + public static Iterator xContentValuesMap(String name, Map map) { + return map( + name, + map, + entry -> (ToXContent) (builder, params) -> entry.getValue().toXContent(builder.field(entry.getKey()), params) + ); + } + + public static Iterator field(String name, boolean value) { + return Iterators.single(((builder, params) -> builder.field(name, value))); + } + + public static Iterator array(String name, Iterator contents) { + return Iterators.concat(ChunkedToXContentHelper.startArray(name), contents, ChunkedToXContentHelper.endArray()); + } + + public static Iterator wrapWithObject(String name, Iterator iterator) { + return Iterators.concat(startObject(name), iterator, endObject()); + } + + private static Iterator map(String name, Map map, Function, ToXContent> toXContent) { + return wrapWithObject(name, map.entrySet().stream().map(toXContent).iterator()); + } + + public static Iterator singleChunk(ToXContent... contents) { + return Iterators.single((builder, params) -> { + for (ToXContent content : contents) { + content.toXContent(builder, params); + } + return builder; + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentObject.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentObject.java new file mode 100644 index 000000000000..b0fad066cd82 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentObject.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.common.xcontent; + +/** + * Chunked equivalent of {@link org.elasticsearch.xcontent.ToXContentObject} that serializes as a full object. + */ +public interface ChunkedToXContentObject extends ChunkedToXContent { + + @Override + default boolean isFragment() { + return false; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index a71f4fe70f32..fda5055e5585 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -594,7 +594,7 @@ public static BytesReference childBytes(XContentParser parser) throws IOExceptio * @param xContentType an instance to serialize */ public static void writeTo(StreamOutput out, XContentType xContentType) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { // when sending an enumeration to () { + transportService.handshake(connection, probeHandshakeTimeout, ActionListener.notifyOnce(new ActionListener<>() { @Override - protected void innerOnResponse(DiscoveryNode remoteNode) { + public void onResponse(DiscoveryNode remoteNode) { try { // success means (amongst other things) that the cluster names match logger.trace("[{}] handshake successful: {}", transportAddress, remoteNode); @@ -166,7 +165,7 @@ public void onFailure(Exception e) { } @Override - protected void innerOnFailure(Exception e) { + public void onFailure(Exception e) { // we opened a connection and successfully performed a low-level handshake, so we were definitely // talking to an Elasticsearch node, but the high-level handshake failed indicating some kind of // mismatched configurations (e.g. cluster name) that the user should address @@ -175,7 +174,7 @@ protected void innerOnFailure(Exception e) { listener.onFailure(e); } - }); + })); }) ); diff --git a/server/src/main/java/org/elasticsearch/env/Environment.java b/server/src/main/java/org/elasticsearch/env/Environment.java index 90d03fe1ce94..9db7f831a186 100644 --- a/server/src/main/java/org/elasticsearch/env/Environment.java +++ b/server/src/main/java/org/elasticsearch/env/Environment.java @@ -9,9 +9,11 @@ package org.elasticsearch.env; import org.apache.lucene.util.Constants; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.StatelessSecureSettings; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; @@ -156,7 +158,12 @@ public Environment(final Settings settings, final Path configPath) { assert sharedDataFile != null; finalSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataFile.toString()); } - this.settings = finalSettings.build(); + + if (DiscoveryNode.isStateless(settings)) { + this.settings = StatelessSecureSettings.install(finalSettings.build()); + } else { + this.settings = finalSettings.build(); + } } /** diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index e4c42af85af4..c21f2c172795 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -8,6 +8,7 @@ package org.elasticsearch.env; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; @@ -25,8 +26,10 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.logging.ChunkedLoggingStream; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -49,12 +52,15 @@ import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.Closeable; import java.io.IOException; +import java.io.OutputStreamWriter; import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.DirectoryStream; import java.nio.file.FileStore; @@ -926,6 +932,38 @@ public Set lockedShards() { } } + // throttle the hot-threads calls: no more than one per minute + private final Semaphore shardLockHotThreadsPermit = new Semaphore(1); + private long nextShardLockHotThreadsNanos = Long.MIN_VALUE; + + private void maybeLogThreadDump(ShardId shardId, String message) { + if (logger.isDebugEnabled() == false) { + return; + } + + final var prefix = format("hot threads while failing to obtain shard lock for %s: %s", shardId, message); + if (shardLockHotThreadsPermit.tryAcquire()) { + try { + final var now = System.nanoTime(); + if (now <= nextShardLockHotThreadsNanos) { + return; + } + nextShardLockHotThreadsNanos = now + TimeUnit.SECONDS.toNanos(60); + final var hotThreads = new HotThreads().busiestThreads(500).ignoreIdleThreads(false).detect(); + try ( + var stream = ChunkedLoggingStream.create(logger, Level.DEBUG, prefix, ReferenceDocs.SHARD_LOCK_TROUBLESHOOTING); + var writer = new OutputStreamWriter(stream, StandardCharsets.UTF_8) + ) { + writer.write(hotThreads); + } + } catch (Exception e) { + logger.error(format("could not obtain %s", prefix), e); + } finally { + shardLockHotThreadsPermit.release(); + } + } + } + private final class InternalShardLock { /* * This class holds a mutex for exclusive access and timeout / wait semantics @@ -975,18 +1013,15 @@ void acquire(long timeoutInMillis, final String details) throws ShardLockObtainF setDetails(details); } else { final Tuple lockDetails = this.lockDetails; // single volatile read - throw new ShardLockObtainFailedException( - shardId, - "obtaining shard lock for [" - + details - + "] timed out after [" - + timeoutInMillis - + "ms], lock already held for [" - + lockDetails.v2() - + "] with age [" - + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lockDetails.v1()) - + "ms]" + final var message = format( + "obtaining shard lock for [%s] timed out after [%dms], lock already held for [%s] with age [%dms]", + details, + timeoutInMillis, + lockDetails.v2(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - lockDetails.v1()) ); + maybeLogThreadDump(shardId, message); + throw new ShardLockObtainFailedException(shardId, message); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); diff --git a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index 56f913984dfe..1249074bfabc 100644 --- a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -49,36 +49,21 @@ */ public abstract class AsyncShardFetch implements Releasable { - /** - * An action that lists the relevant shard data that needs to be fetched. - */ - public interface Lister, NodeResponse extends BaseNodeResponse> { - void list(ShardId shardId, @Nullable String customDataPath, DiscoveryNode[] nodes, ActionListener listener); - } - protected final Logger logger; protected final String type; protected final ShardId shardId; protected final String customDataPath; - private final Lister, T> action; private final Map> cache = new HashMap<>(); private final Set nodesToIgnore = new HashSet<>(); private final AtomicLong round = new AtomicLong(); private boolean closed; @SuppressWarnings("unchecked") - protected AsyncShardFetch( - Logger logger, - String type, - ShardId shardId, - String customDataPath, - Lister, T> action - ) { + protected AsyncShardFetch(Logger logger, String type, ShardId shardId, String customDataPath) { this.logger = logger; this.type = type; this.shardId = Objects.requireNonNull(shardId); this.customDataPath = Objects.requireNonNull(customDataPath); - this.action = (Lister, T>) action; } @Override @@ -307,7 +292,7 @@ private boolean hasAnyNodeFetching(Map> shardCache) { // visible for testing void asyncFetch(final DiscoveryNode[] nodes, long fetchingRound) { logger.trace("{} fetching [{}] from {}", shardId, type, nodes); - action.list(shardId, customDataPath, nodes, new ActionListener>() { + list(shardId, customDataPath, nodes, new ActionListener>() { @Override public void onResponse(BaseNodesResponse response) { assert assertSameNodes(response); @@ -336,6 +321,13 @@ private boolean assertSameNodes(BaseNodesResponse response) { }); } + protected abstract void list( + ShardId shardId, + @Nullable String customDataPath, + DiscoveryNode[] nodes, + ActionListener> listener + ); + /** * The result of a fetch operation. Make sure to first check {@link #hasData()} before * fetching the actual data. diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index 914cd156643b..6c7c751491c3 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.common.settings.ClusterSettings; import java.util.Map; @@ -82,9 +83,9 @@ static ClusterState recoverClusterBlocks(final ClusterState state) { return ClusterState.builder(state).blocks(blocks).build(); } - static ClusterState updateRoutingTable(final ClusterState state) { + static ClusterState updateRoutingTable(final ClusterState state, ShardRoutingRoleStrategy shardRoutingRoleStrategy) { // initialize all index routing tables as empty - final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(state.routingTable()); + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder(shardRoutingRoleStrategy, state.routingTable()); for (final IndexMetadata indexMetadata : state.metadata().indices().values()) { routingTableBuilder.addAsRecovery(indexMetadata); } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index fdd2f6b0052b..a8dbda1b137d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.Releasables; -import org.elasticsearch.gateway.AsyncShardFetch.Lister; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetadata; @@ -212,16 +211,10 @@ private boolean hasNewNodes(DiscoveryNodes nodes) { return false; } - class InternalAsyncFetch extends AsyncShardFetch { + abstract class InternalAsyncFetch extends AsyncShardFetch { - InternalAsyncFetch( - Logger logger, - String type, - ShardId shardId, - String customDataPath, - Lister, T> action - ) { - super(logger, type, shardId, customDataPath, action); + InternalAsyncFetch(Logger logger, String type, ShardId shardId, String customDataPath) { + super(logger, type, shardId, customDataPath); } @Override @@ -250,16 +243,28 @@ class InternalPrimaryShardAllocator extends PrimaryShardAllocator { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { // explicitly type lister, some IDEs (Eclipse) are not able to correctly infer the function type - Lister, NodeGatewayStartedShards> lister = this::listStartedShards; AsyncShardFetch fetch = asyncFetchStarted.computeIfAbsent( shard.shardId(), shardId -> new InternalAsyncFetch<>( logger, "shard_started", shardId, - IndexMetadata.INDEX_DATA_PATH_SETTING.get(allocation.metadata().index(shard.index()).getSettings()), - lister - ) + IndexMetadata.INDEX_DATA_PATH_SETTING.get(allocation.metadata().index(shard.index()).getSettings()) + ) { + @Override + protected void list( + ShardId shardId, + String customDataPath, + DiscoveryNode[] nodes, + ActionListener> listener + ) { + client.executeLocally( + TransportNodesListGatewayStartedShards.TYPE, + new TransportNodesListGatewayStartedShards.Request(shardId, customDataPath, nodes), + ActionListener.wrap(listener) + ); + } + } ); AsyncShardFetch.FetchResult shardState = fetch.fetchData( allocation.nodes(), @@ -272,19 +277,6 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR return shardState; } - private void listStartedShards( - ShardId shardId, - String customDataPath, - DiscoveryNode[] nodes, - ActionListener> listener - ) { - var request = new TransportNodesListGatewayStartedShards.Request(shardId, customDataPath, nodes); - client.executeLocally( - TransportNodesListGatewayStartedShards.TYPE, - request, - ActionListener.wrap(listener::onResponse, listener::onFailure) - ); - } } class InternalReplicaShardAllocator extends ReplicaShardAllocator { @@ -297,17 +289,28 @@ class InternalReplicaShardAllocator extends ReplicaShardAllocator { @Override protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { - // explicitly type lister, some IDEs (Eclipse) are not able to correctly infer the function type - Lister, NodeStoreFilesMetadata> lister = this::listStoreFilesMetadata; AsyncShardFetch fetch = asyncFetchStore.computeIfAbsent( shard.shardId(), shardId -> new InternalAsyncFetch<>( logger, "shard_store", shard.shardId(), - IndexMetadata.INDEX_DATA_PATH_SETTING.get(allocation.metadata().index(shard.index()).getSettings()), - lister - ) + IndexMetadata.INDEX_DATA_PATH_SETTING.get(allocation.metadata().index(shard.index()).getSettings()) + ) { + @Override + protected void list( + ShardId shardId, + String customDataPath, + DiscoveryNode[] nodes, + ActionListener> listener + ) { + client.executeLocally( + TransportNodesListShardStoreMetadata.TYPE, + new TransportNodesListShardStoreMetadata.Request(shardId, customDataPath, nodes), + ActionListener.wrap(listener) + ); + } + } ); AsyncShardFetch.FetchResult shardStores = fetch.fetchData( allocation.nodes(), @@ -319,20 +322,6 @@ protected AsyncShardFetch.FetchResult fetchData(ShardRou return shardStores; } - private void listStoreFilesMetadata( - ShardId shardId, - String customDataPath, - DiscoveryNode[] nodes, - ActionListener> listener - ) { - var request = new TransportNodesListShardStoreMetadata.Request(shardId, customDataPath, nodes); - client.executeLocally( - TransportNodesListShardStoreMetadata.TYPE, - request, - ActionListener.wrap(listener::onResponse, listener::onFailure) - ); - } - @Override protected boolean hasInitiatedFetching(ShardRouting shard) { return asyncFetchStore.get(shard.shardId()) != null; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 3162a3a4ddf0..10bcc2cae9b6 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; +import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; @@ -70,6 +71,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5); + private final ShardRoutingRoleStrategy shardRoutingRoleStrategy; private final ThreadPool threadPool; private final RerouteService rerouteService; @@ -87,10 +89,12 @@ public GatewayService( final Settings settings, final RerouteService rerouteService, final ClusterService clusterService, + final ShardRoutingRoleStrategy shardRoutingRoleStrategy, final ThreadPool threadPool ) { this.rerouteService = rerouteService; this.clusterService = clusterService; + this.shardRoutingRoleStrategy = shardRoutingRoleStrategy; this.threadPool = threadPool; this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(settings); @@ -214,7 +218,9 @@ public ClusterState execute(final ClusterState currentState) { logger.debug("cluster is already recovered"); return currentState; } - return ClusterStateUpdaters.removeStateNotRecoveredBlock(ClusterStateUpdaters.updateRoutingTable(currentState)); + return ClusterStateUpdaters.removeStateNotRecoveredBlock( + ClusterStateUpdaters.updateRoutingTable(currentState, shardRoutingRoleStrategy) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 509d29f885b7..cf4e903ef652 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -118,7 +119,10 @@ public ClusterState execute(ClusterState currentState) { } Metadata.Builder metadata = Metadata.builder(currentState.metadata()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); final Version minIndexCompatibilityVersion = currentState.getNodes() .getMaxNodeVersion() .minimumIndexCompatibilityVersion(); @@ -269,7 +273,7 @@ public void writeTo(StreamOutput out) throws IOException { public static class AllocateDangledResponse extends TransportResponse { private AllocateDangledResponse(StreamInput in) throws IOException { - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readBoolean(); } } @@ -278,7 +282,7 @@ private AllocateDangledResponse() {} @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeBoolean(true); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index 752ef4345229..28e0a7a1db16 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -60,6 +60,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -71,7 +72,6 @@ import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; @@ -1082,7 +1082,7 @@ private void addIndexMetadataDocuments(IndexMetadata indexMetadata) throws IOExc private void addGlobalMetadataDocuments(Metadata metadata) throws IOException { logger.trace("updating global metadata doc"); - writePages(metadata, (bytesRef, pageIndex, isLastPage) -> { + writePages(ChunkedToXContent.wrapAsToXContent(metadata), (bytesRef, pageIndex, isLastPage) -> { final Document document = new Document(); document.add(new StringField(TYPE_FIELD_NAME, GLOBAL_TYPE_NAME, Field.Store.NO)); document.add(new StoredField(PAGE_FIELD_NAME, pageIndex)); @@ -1094,7 +1094,7 @@ private void addGlobalMetadataDocuments(Metadata metadata) throws IOException { }); } - private void writePages(ToXContentFragment metadata, PageWriter pageWriter) throws IOException { + private void writePages(ToXContent metadata, PageWriter pageWriter) throws IOException { try ( PageWriterOutputStream paginatedStream = new PageWriterOutputStream(documentBuffer, pageWriter); OutputStream compressedStream = CompressorFactory.COMPRESSOR.threadLocalOutputStream(paginatedStream); diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 393e4fd66c9d..5246d875692c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -327,7 +327,7 @@ public static Tuple> canBeAllocatedT * Takes the store info for nodes that have a shard store and adds them to the node decisions, * leaving the node explanations untouched for those nodes that do not have any store information. */ - private static List augmentExplanationsWithStoreInfo( + public static List augmentExplanationsWithStoreInfo( Map nodeDecisions, Map withShardStores ) { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 0a40899762ed..8fc67c3a3195 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -8,8 +8,10 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -56,6 +58,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction TransportNodesListGatewayStartedShards.NodeRequest, TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> { + private static final Logger logger = LogManager.getLogger(TransportNodesListGatewayStartedShards.class); + public static final String ACTION_NAME = "internal:gateway/local/started_shards"; public static final ActionType TYPE = new ActionType<>(ACTION_NAME, NodesGatewayStartedShards::new); @@ -187,7 +191,7 @@ public static class Request extends BaseNodesRequest { public Request(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -218,7 +222,7 @@ public String getCustomDataPath() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { out.writeString(customDataPath); } } @@ -258,7 +262,7 @@ public static class NodeRequest extends TransportRequest { public NodeRequest(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getVersion().onOrAfter(Version.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -274,7 +278,7 @@ public NodeRequest(Request request) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { assert customDataPath != null; out.writeString(customDataPath); } diff --git a/server/src/main/java/org/elasticsearch/health/Diagnosis.java b/server/src/main/java/org/elasticsearch/health/Diagnosis.java index 2f3d44b1f699..a190dd3d5df0 100644 --- a/server/src/main/java/org/elasticsearch/health/Diagnosis.java +++ b/server/src/main/java/org/elasticsearch/health/Diagnosis.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContent; @@ -19,9 +20,6 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.stream.StreamSupport; import static org.elasticsearch.health.HealthService.HEALTH_API_ID_PREFIX; @@ -46,6 +44,7 @@ public enum Type { INDEX("indices"), NODE("nodes"), SLM_POLICY("slm_policies"), + FEATURE_STATE("feature_states"), SNAPSHOT_REPOSITORY("snapshot_repositories"); private final String displayValue; @@ -77,8 +76,8 @@ public Resource(Collection nodes) { } @Override - public Iterator toXContentChunked() { - Iterator valuesIterator; + public Iterator toXContentChunked(ToXContent.Params outerParams) { + final Iterator valuesIterator; if (nodes != null) { valuesIterator = nodes.stream().map(node -> (ToXContent) (builder, params) -> { builder.startObject(); @@ -92,11 +91,7 @@ public Iterator toXContentChunked() { } else { valuesIterator = values.stream().map(value -> (ToXContent) (builder, params) -> builder.value(value)).iterator(); } - return Iterators.concat( - Iterators.single((ToXContent) (builder, params) -> builder.startArray(type.displayValue)), - valuesIterator, - Iterators.single((builder, params) -> builder.endArray()) - ); + return ChunkedToXContentHelper.array(type.displayValue, valuesIterator); } @Override @@ -147,12 +142,12 @@ public String getUniqueId() { } @Override - public Iterator toXContentChunked() { - Iterator resourcesIterator = Collections.emptyIterator(); - if (affectedResources != null && affectedResources.size() > 0) { - resourcesIterator = affectedResources.stream() - .flatMap(s -> StreamSupport.stream(Spliterators.spliteratorUnknownSize(s.toXContentChunked(), Spliterator.ORDERED), false)) - .iterator(); + public Iterator toXContentChunked(ToXContent.Params outerParams) { + final Iterator resourcesIterator; + if (affectedResources == null) { + resourcesIterator = Collections.emptyIterator(); + } else { + resourcesIterator = Iterators.flatMap(affectedResources.iterator(), s -> s.toXContentChunked(outerParams)); } return Iterators.concat(Iterators.single((ToXContent) (builder, params) -> { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java index e8a8a823308e..5e60b2b6c87b 100644 --- a/server/src/main/java/org/elasticsearch/health/GetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/GetHealthAction.java @@ -37,6 +37,8 @@ import java.util.NoSuchElementException; import java.util.Objects; +import static org.elasticsearch.action.ValidateActions.addValidationError; + public class GetHealthAction extends ActionType { public static final GetHealthAction INSTANCE = new GetHealthAction(); @@ -93,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override @SuppressWarnings("unchecked") - public Iterator toXContentChunked() { + public Iterator toXContentChunked(ToXContent.Params outerParams) { return Iterators.concat(Iterators.single((ToXContent) (builder, params) -> { builder.startObject(); if (status != null) { @@ -111,7 +113,7 @@ public Iterator toXContentChunked() { // indicators however the affected resources which are the O(indices) fields are // flat mapped over all diagnoses within the indicator Iterators.single((ToXContent) (builder, params) -> builder.field(indicator.name())), - indicator.toXContentChunked() + indicator.toXContentChunked(outerParams) ) ) .toArray(Iterator[]::new) @@ -146,21 +148,25 @@ public String toString() { public static class Request extends ActionRequest { private final String indicatorName; private final boolean verbose; + private final int size; - public Request(boolean verbose) { - // We never compute details if no indicator name is given because of the runtime cost: - this.indicatorName = null; - this.verbose = verbose; + public Request(boolean verbose, int size) { + this(null, verbose, size); } - public Request(String indicatorName, boolean verbose) { + public Request(String indicatorName, boolean verbose, int size) { this.indicatorName = indicatorName; this.verbose = verbose; + this.size = size; } @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = null; + if (size < 0) { + validationException = addValidationError("The size parameter must be a positive integer", validationException); + } + return validationException; } @Override @@ -195,11 +201,21 @@ public TransportAction( @Override protected void doExecute(Task task, Request request, ActionListener responseListener) { assert task instanceof CancellableTask; - healthService.getHealth(client, request.indicatorName, request.verbose, responseListener.map(healthIndicatorResults -> { - Response response = new Response(clusterService.getClusterName(), healthIndicatorResults, request.indicatorName == null); - healthApiStats.track(request.verbose, response); - return response; - })); + healthService.getHealth( + client, + request.indicatorName, + request.verbose, + request.size, + responseListener.map(healthIndicatorResults -> { + Response response = new Response( + clusterService.getClusterName(), + healthIndicatorResults, + request.indicatorName == null + ); + healthApiStats.track(request.verbose, response); + return response; + }) + ); } } } diff --git a/server/src/main/java/org/elasticsearch/health/HealthIndicatorResult.java b/server/src/main/java/org/elasticsearch/health/HealthIndicatorResult.java index 46ecabca1590..6e8e884a41d5 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthIndicatorResult.java +++ b/server/src/main/java/org/elasticsearch/health/HealthIndicatorResult.java @@ -9,15 +9,12 @@ package org.elasticsearch.health; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.xcontent.ToXContent; import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.Spliterator; -import java.util.Spliterators; -import java.util.stream.StreamSupport; public record HealthIndicatorResult( String name, @@ -26,14 +23,14 @@ public record HealthIndicatorResult( HealthIndicatorDetails details, List impacts, List diagnosisList -) implements ChunkedToXContent { +) implements ChunkedToXContentObject { @Override - public Iterator toXContentChunked() { - Iterator diagnosisIterator = Collections.emptyIterator(); - if (diagnosisList != null && diagnosisList.isEmpty() == false) { - diagnosisIterator = diagnosisList.stream() - .flatMap(s -> StreamSupport.stream(Spliterators.spliteratorUnknownSize(s.toXContentChunked(), Spliterator.ORDERED), false)) - .iterator(); + public Iterator toXContentChunked(ToXContent.Params outerParams) { + final Iterator diagnosisIterator; + if (diagnosisList == null) { + diagnosisIterator = Collections.emptyIterator(); + } else { + diagnosisIterator = Iterators.flatMap(diagnosisList.iterator(), s -> s.toXContentChunked(outerParams)); } return Iterators.concat(Iterators.single((ToXContent) (builder, params) -> { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java index 1c0972f8cb35..867df43aece3 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java @@ -17,12 +17,21 @@ /** * This is a service interface used to calculate health indicator from the different modules or plugins. + + * NOTE: if you are adding the name of an indicator or the id of a diagnosis you need to update the configuration + * of the health-api-indexer in the telemetry repository so the new/changed fields will be indexed properly. */ public interface HealthIndicatorService { + int MAX_AFFECTED_RESOURCES_COUNT = 1000; + String name(); - HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo); + default HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + return calculate(verbose, MAX_AFFECTED_RESOURCES_COUNT, healthInfo); + } + + HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo); /** * This method creates a HealthIndicatorResult with the given information. Note that it sorts the impacts by severity (the lower the diff --git a/server/src/main/java/org/elasticsearch/health/HealthService.java b/server/src/main/java/org/elasticsearch/health/HealthService.java index c3079c6a7170..7828f12e7aba 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthService.java +++ b/server/src/main/java/org/elasticsearch/health/HealthService.java @@ -76,20 +76,25 @@ public HealthService( * * @param client A client to be used to fetch the health data from the health node * @param indicatorName If not null, the returned results will only have this indicator - * @param explain Whether to compute the details portion of the results + * @param verbose Whether to compute the details portion of the results * @param listener A listener to be notified of the list of all HealthIndicatorResult if indicatorName is null, or one * HealthIndicatorResult if indicatorName is not null + * @param maxAffectedResourcesCount The maximum number of affected resources to return per each type. * @throws ResourceNotFoundException if an indicator name is given and the indicator is not found */ public void getHealth( Client client, @Nullable String indicatorName, - boolean explain, + boolean verbose, + int maxAffectedResourcesCount, ActionListener> listener ) { + if (maxAffectedResourcesCount < 0) { + throw new IllegalArgumentException("The max number of resources must be a positive integer"); + } // Determine if cluster is stable enough to calculate health before running other indicators List preflightResults = preflightHealthIndicatorServices.stream() - .map(service -> service.calculate(explain, HealthInfo.EMPTY_HEALTH_INFO)) + .map(service -> service.calculate(verbose, maxAffectedResourcesCount, HealthInfo.EMPTY_HEALTH_INFO)) .toList(); // If any of these are not GREEN, then we cannot obtain health from other indicators @@ -113,7 +118,7 @@ public void onResponse(FetchHealthInfoCacheAction.Response response) { ActionRunnable> calculateFilteredIndicatorsRunnable = calculateFilteredIndicatorsRunnable( indicatorName, healthInfo, - explain, + verbose, listener ); @@ -131,7 +136,7 @@ public void onFailure(Exception e) { ActionRunnable> calculateFilteredIndicatorsRunnable = calculateFilteredIndicatorsRunnable( indicatorName, HealthInfo.EMPTY_HEALTH_INFO, - explain, + verbose, listener ); try { @@ -150,7 +155,7 @@ private ActionRunnable> calculateFilteredIndicatorsR return ActionRunnable.wrap(listener, l -> { List results = Stream.concat( filteredPreflightResults, - filteredIndicators.map(service -> service.calculate(explain, healthInfo)) + filteredIndicators.map(service -> service.calculate(explain, maxAffectedResourcesCount, healthInfo)) ).toList(); validateResultsAndNotifyListener(indicatorName, results, l); @@ -160,7 +165,7 @@ private ActionRunnable> calculateFilteredIndicatorsR } else { // Mark remaining indicators as UNKNOWN - HealthIndicatorDetails unknownDetails = healthUnknownReason(preflightResults, explain); + HealthIndicatorDetails unknownDetails = healthUnknownReason(preflightResults, verbose); Stream filteredIndicatorResults = filteredIndicators.map( service -> generateUnknownResult(service, UNKNOWN_RESULT_SUMMARY_PREFLIGHT_FAILED, unknownDetails) ); diff --git a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java index b1ab2e6dea74..c7068828de6a 100644 --- a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java @@ -23,6 +23,8 @@ public class RestGetHealthAction extends BaseRestHandler { private static final String VERBOSE_PARAM = "verbose"; + private static final String SIZE_PARAM = "size"; + @Override public String getName() { // TODO: Existing - "cluster_health_action", "cat_health_action" @@ -31,14 +33,15 @@ public String getName() { @Override public List routes() { - return List.of(new Route(GET, "/_internal/_health"), new Route(GET, "/_internal/_health/{indicator}")); + return List.of(new Route(GET, "/_health_report"), new Route(GET, "/_health_report/{indicator}")); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { String indicatorName = request.param("indicator"); boolean verbose = request.paramAsBoolean(VERBOSE_PARAM, true); - GetHealthAction.Request getHealthRequest = new GetHealthAction.Request(indicatorName, verbose); + int size = request.paramAsInt(SIZE_PARAM, 1000); + GetHealthAction.Request getHealthRequest = new GetHealthAction.Request(indicatorName, verbose, size); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( GetHealthAction.INSTANCE, getHealthRequest, diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 4c64e6f404a5..859a4fc2a8c1 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -8,20 +8,24 @@ package org.elasticsearch.health.metadata; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RelativeByteSizeValue; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Iterator; import java.util.Objects; /** @@ -49,8 +53,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_8_5_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_8_5_0; } @Override @@ -63,22 +67,19 @@ public static NamedDiff readDiffFrom(StreamInput in) throws } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(DISK_METADATA.getPreferredName()); - diskMetadata.toXContent(builder, params); - builder.endObject(); - return builder; + public Iterator toXContentChunked(ToXContent.Params ignored) { + return Iterators.single((builder, params) -> { + builder.startObject(DISK_METADATA.getPreferredName()); + diskMetadata.toXContent(builder, params); + builder.endObject(); + return builder; + }); } public static HealthMetadata getFromClusterState(ClusterState clusterState) { return clusterState.custom(HealthMetadata.TYPE); } - @Override - public boolean isFragment() { - return true; - } - public Disk getDiskMetadata() { return diskMetadata; } @@ -96,6 +97,11 @@ public int hashCode() { return Objects.hash(diskMetadata); } + @Override + public String toString() { + return "HealthMetadata{diskMetadata=" + Strings.toString(diskMetadata) + '}'; + } + /** * Contains the thresholds necessary to determine the health of the disk space of a node. The thresholds are determined by the elected * master. @@ -110,7 +116,7 @@ public record Disk( ) implements ToXContentFragment, Writeable { public static final String TYPE = "disk"; - public static Version VERSION_SUPPORTING_HEADROOM_FIELDS = Version.V_8_5_0; + public static final TransportVersion VERSION_SUPPORTING_HEADROOM_FIELDS = TransportVersion.V_8_5_0; private static final ParseField HIGH_WATERMARK_FIELD = new ParseField("high_watermark"); private static final ParseField HIGH_MAX_HEADROOM_FIELD = new ParseField("high_max_headroom"); @@ -133,10 +139,10 @@ static Disk readFrom(StreamInput in) throws IOException { FROZEN_FLOOD_STAGE_WATERMARK_FIELD.getPreferredName() ); ByteSizeValue frozenFloodStageMaxHeadroom = ByteSizeValue.readFrom(in); - ByteSizeValue highMaxHeadroom = in.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) + ByteSizeValue highMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) ? ByteSizeValue.readFrom(in) : ByteSizeValue.MINUS_ONE; - ByteSizeValue floodStageMaxHeadroom = in.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) + ByteSizeValue floodStageMaxHeadroom = in.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS) ? ByteSizeValue.readFrom(in) : ByteSizeValue.MINUS_ONE; return new Disk( @@ -155,7 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(describeFloodStageWatermark()); out.writeString(describeFrozenFloodStageWatermark()); frozenFloodStageMaxHeadroom.writeTo(out); - if (out.getVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) { + if (out.getTransportVersion().onOrAfter(VERSION_SUPPORTING_HEADROOM_FIELDS)) { highMaxHeadroom.writeTo(out); floodStageMaxHeadroom.writeTo(out); } @@ -185,11 +191,11 @@ private ByteSizeValue getFreeBytes(ByteSizeValue total, RelativeByteSizeValue wa } public ByteSizeValue getFreeBytesHighWatermark(ByteSizeValue total) { - return getFreeBytes(total, highWatermark, ByteSizeValue.MINUS_ONE); + return getFreeBytes(total, highWatermark, highMaxHeadroom); } public ByteSizeValue getFreeBytesFloodStageWatermark(ByteSizeValue total) { - return getFreeBytes(total, floodStageWatermark, ByteSizeValue.MINUS_ONE); + return getFreeBytes(total, floodStageWatermark, floodStageMaxHeadroom); } public ByteSizeValue getFreeBytesFrozenFloodStageWatermark(ByteSizeValue total) { diff --git a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java index 0c96c6807a65..dafd5829ff4b 100644 --- a/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/node/DiskHealthIndicatorService.java @@ -41,6 +41,7 @@ import java.util.stream.Stream; import static org.elasticsearch.cluster.node.DiscoveryNode.DISCOVERY_NODE_COMPARATOR; +import static org.elasticsearch.common.util.CollectionUtils.limitSize; import static org.elasticsearch.health.node.HealthIndicatorDisplayValues.are; import static org.elasticsearch.health.node.HealthIndicatorDisplayValues.getSortedUniqueValuesString; import static org.elasticsearch.health.node.HealthIndicatorDisplayValues.getTruncatedIndices; @@ -68,7 +69,6 @@ public class DiskHealthIndicatorService implements HealthIndicatorService { private static final String IMPACT_INGEST_AT_RISK_ID = "ingest_capability_at_risk"; private static final String IMPACT_CLUSTER_STABILITY_AT_RISK_ID = "cluster_stability_at_risk"; private static final String IMPACT_CLUSTER_FUNCTIONALITY_UNAVAILABLE_ID = "cluster_functionality_unavailable"; - private static final String IMPACT_DATA_NODE_WITHOUT_DISK_SPACE = "data_node_without_disk_space"; private final ClusterService clusterService; @@ -82,7 +82,7 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { Map diskHealthInfoMap = healthInfo.diskInfoByNode(); if (diskHealthInfoMap == null || diskHealthInfoMap.isEmpty()) { /* @@ -107,7 +107,7 @@ public HealthIndicatorResult calculate(boolean verbose, HealthInfo healthInfo) { diskHealthAnalyzer.getSymptom(), diskHealthAnalyzer.getDetails(verbose), diskHealthAnalyzer.getImpacts(), - diskHealthAnalyzer.getDiagnoses() + diskHealthAnalyzer.getDiagnoses(maxAffectedResourcesCount) ); } @@ -344,7 +344,7 @@ List getImpacts() { return impacts; } - private List getDiagnoses() { + private List getDiagnoses(int size) { if (healthStatus == HealthStatus.GREEN) { return List.of(); } @@ -353,7 +353,7 @@ private List getDiagnoses() { Set affectedIndices = Sets.union(blockedIndices, indicesAtRisk); List affectedResources = new ArrayList<>(); if (dataNodes.size() > 0) { - Diagnosis.Resource nodeResources = new Diagnosis.Resource(dataNodes); + Diagnosis.Resource nodeResources = new Diagnosis.Resource(limitSize(dataNodes, size)); affectedResources.add(nodeResources); } if (affectedIndices.size() > 0) { @@ -361,60 +361,24 @@ private List getDiagnoses() { Diagnosis.Resource.Type.INDEX, affectedIndices.stream() .sorted(indicesComparatorByPriorityAndName(clusterState.metadata())) + .limit(Math.min(affectedIndices.size(), size)) .collect(Collectors.toList()) ); affectedResources.add(indexResources); } - if (affectedIndices.size() > 0) { - diagnosisList.add( - new Diagnosis( - new Diagnosis.Definition( - NAME, - "add_disk_capacity_data_nodes", - String.format( - Locale.ROOT, - "%d %s %s on nodes that have run or are likely to run out of disk space, " - + "this can temporarily disable writing on %s %s.", - affectedIndices.size(), - indices(affectedIndices.size()), - regularVerb("reside", affectedIndices.size()), - these(affectedIndices.size()), - indices(affectedIndices.size()) - ), - "Enable autoscaling (if applicable), add disk capacity or free up disk space to resolve " - + "this. If you have already taken action please wait for the rebalancing to complete.", - "https://ela.st/fix-data-disk" - ), - affectedResources - ) - ); - } else { - diagnosisList.add( - new Diagnosis( - new Diagnosis.Definition( - NAME, - "add_disk_capacity_data_nodes", - "Disk is almost full.", - "Enable autoscaling (if applicable), add disk capacity or free up disk space to resolve " - + "this. If you have already taken action please wait for the rebalancing to complete.", - "https://ela.st/fix-data-disk" - ), - affectedResources - ) - ); - } + diagnosisList.add(createDataNodeDiagnosis(affectedIndices.size(), affectedResources)); } if (masterNodes.containsKey(HealthStatus.RED)) { - diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.RED, masterNodes.get(HealthStatus.RED), true)); + diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.RED, masterNodes.get(HealthStatus.RED), size, true)); } if (masterNodes.containsKey(HealthStatus.YELLOW)) { - diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.YELLOW, masterNodes.get(HealthStatus.YELLOW), true)); + diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.YELLOW, masterNodes.get(HealthStatus.YELLOW), size, true)); } if (otherNodes.containsKey(HealthStatus.RED)) { - diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.RED, otherNodes.get(HealthStatus.RED), false)); + diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.RED, otherNodes.get(HealthStatus.RED), size, false)); } if (otherNodes.containsKey(HealthStatus.YELLOW)) { - diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.YELLOW, otherNodes.get(HealthStatus.YELLOW), false)); + diagnosisList.add(createNonDataNodeDiagnosis(HealthStatus.YELLOW, otherNodes.get(HealthStatus.YELLOW), size, false)); } return diagnosisList; } @@ -487,7 +451,35 @@ static Set getIndicesForNodes(List nodes, ClusterState cl .collect(Collectors.toSet()); } - private Diagnosis createNonDataNodeDiagnosis(HealthStatus healthStatus, Collection nodes, boolean isMaster) { + // Visible for testing + static Diagnosis createDataNodeDiagnosis(int numberOfAffectedIndices, List affectedResources) { + String message = numberOfAffectedIndices == 0 + ? "Disk is almost full." + : String.format( + Locale.ROOT, + "%d %s %s on nodes that have run or are likely to run out of disk space, " + + "this can temporarily disable writing on %s %s.", + numberOfAffectedIndices, + indices(numberOfAffectedIndices), + regularVerb("reside", numberOfAffectedIndices), + these(numberOfAffectedIndices), + indices(numberOfAffectedIndices) + ); + return new Diagnosis( + new Diagnosis.Definition( + NAME, + "add_disk_capacity_data_nodes", + message, + "Enable autoscaling (if applicable), add disk capacity or free up disk space to resolve " + + "this. If you have already taken action please wait for the rebalancing to complete.", + "https://ela.st/fix-data-disk" + ), + affectedResources + ); + } + + // Visible for testing + static Diagnosis createNonDataNodeDiagnosis(HealthStatus healthStatus, List nodes, int size, boolean isMaster) { return new Diagnosis( new Diagnosis.Definition( NAME, @@ -496,7 +488,7 @@ private Diagnosis createNonDataNodeDiagnosis(HealthStatus healthStatus, Collecti "Please add capacity to the current nodes, or replace them with ones with higher capacity.", isMaster ? "https://ela.st/fix-master-disk" : "https://ela.st/fix-disk-space" ), - List.of(new Diagnosis.Resource(nodes)) + List.of(new Diagnosis.Resource(limitSize(nodes, size))) ); } diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index 857dda9f0d46..9d034bb3c249 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; @@ -159,7 +160,7 @@ private void startMonitoringIfNecessary() { ); logger.debug("Local health monitoring started {}", monitoring); } else { - logger.debug("Local health monitoring already started {}, skipping", monitoring); + logger.trace("Local health monitoring already started {}, skipping", monitoring); } } } @@ -433,9 +434,18 @@ DiskHealthInfo getHealth(HealthMetadata healthMetadata, ClusterState clusterStat } long highThreshold = diskMetadata.getFreeBytesHighWatermark(totalBytes).getBytes(); - if (usage.getFreeBytes() < highThreshold && hasRelocatingShards(clusterState, node.getId()) == false) { - logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage); - return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD); + if (usage.getFreeBytes() < highThreshold) { + if (node.canContainData()) { + // for data nodes only report YELLOW if shards can't move away from the node + if (DiskCheck.hasRelocatingShards(clusterState, node) == false) { + logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage); + return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD); + } + } else { + // for non-data nodes report YELLOW when the disk high watermark is breached + logger.debug("High disk watermark [{}] exceeded on {}", highThreshold, usage); + return new DiskHealthInfo(HealthStatus.YELLOW, DiskHealthInfo.Cause.NODE_OVER_HIGH_THRESHOLD); + } } return new DiskHealthInfo(HealthStatus.GREEN); } @@ -461,8 +471,13 @@ private DiskUsage getDiskUsage() { return DiskUsage.findLeastAvailablePath(nodeStats); } - private boolean hasRelocatingShards(ClusterState clusterState, String nodeId) { - return clusterState.getRoutingNodes().node(nodeId).shardsWithState(ShardRoutingState.RELOCATING).isEmpty() == false; + static boolean hasRelocatingShards(ClusterState clusterState, DiscoveryNode node) { + RoutingNode routingNode = clusterState.getRoutingNodes().node(node.getId()); + if (routingNode == null) { + // routing node will be null for non-data nodes + return false; + } + return routingNode.numberOfShardsWithState(ShardRoutingState.RELOCATING) > 0; } } } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java index 888d1f2dae95..4a68115af634 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java @@ -8,7 +8,7 @@ package org.elasticsearch.health.node.selection; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.persistent.PersistentTaskParams; @@ -46,8 +46,8 @@ public String getWriteableName() { } @Override - public Version getMinimalSupportedVersion() { - return Version.V_8_5_0; + public TransportVersion getMinimalSupportedVersion() { + return TransportVersion.V_8_5_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStats.java b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStats.java index e6b6fedec296..99a1a4dce4b5 100644 --- a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStats.java +++ b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStats.java @@ -15,10 +15,6 @@ import org.elasticsearch.health.HealthIndicatorResult; import org.elasticsearch.health.HealthStatus; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.function.BiFunction; import java.util.function.Function; @@ -43,9 +39,6 @@ public class HealthApiStats { diagnosis ); - private final Set statuses = ConcurrentHashMap.newKeySet(); - private final ConcurrentMap> indicators = new ConcurrentHashMap<>(); - private final ConcurrentMap> diagnoses = new ConcurrentHashMap<>(); private final Counters stats = new Counters(TOTAL_INVOCATIONS); public HealthApiStats() {} @@ -65,19 +58,15 @@ public void track(boolean verbose, GetHealthAction.Response response) { : response.getIndicatorResults().stream().map(HealthIndicatorResult::status).findFirst().orElse(null); if (status != null) { stats.inc(statusLabel.apply(status)); - statuses.add(status); } if (status != HealthStatus.GREEN) { for (HealthIndicatorResult indicator : response.getIndicatorResults()) { if (indicator.status() != HealthStatus.GREEN) { stats.inc(indicatorLabel.apply(indicator.status(), indicator.name())); - indicators.computeIfAbsent(indicator.status(), k -> ConcurrentHashMap.newKeySet()).add(indicator.name()); if (indicator.diagnosisList() != null) { for (Diagnosis diagnosis : indicator.diagnosisList()) { stats.inc(diagnosisLabel.apply(indicator.status(), diagnosis.definition().getUniqueId())); - diagnoses.computeIfAbsent(indicator.status(), k -> ConcurrentHashMap.newKeySet()) - .add(diagnosis.definition().getUniqueId()); } } } @@ -92,16 +81,4 @@ public boolean hasCounters() { public Counters getStats() { return stats; } - - public Map> getIndicators() { - return Map.copyOf(indicators); - } - - public Map> getDiagnoses() { - return Map.copyOf(diagnoses); - } - - public Set getStatuses() { - return Set.copyOf(statuses); - } } diff --git a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java index 237fd6bf6d5a..f2df53cd2327 100644 --- a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java +++ b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java @@ -20,16 +20,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.metrics.Counters; import org.elasticsearch.core.Nullable; -import org.elasticsearch.health.HealthStatus; import org.elasticsearch.transport.TransportRequest; import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Objects; -import java.util.Set; /** * This class collects the stats of the health API from every node @@ -113,47 +108,13 @@ public Counters getStats() { return Counters.merge(counters); } - public Set getStatuses() { - Set statuses = new HashSet<>(); - for (Node node : getNodes()) { - statuses.addAll(node.statuses); - } - return statuses; - } - - public Map> getIndicators() { - Map> indicators = new HashMap<>(); - for (Node node : getNodes()) { - for (HealthStatus status : node.indicators.keySet()) { - indicators.computeIfAbsent(status, s -> new HashSet<>()).addAll(node.indicators.get(status)); - } - } - return indicators; - } - - public Map> getDiagnoses() { - Map> diagnoses = new HashMap<>(); - for (Node node : getNodes()) { - for (HealthStatus status : node.diagnoses.keySet()) { - diagnoses.computeIfAbsent(status, s -> new HashSet<>()).addAll(node.diagnoses.get(status)); - } - } - return diagnoses; - } - public static class Node extends BaseNodeResponse { @Nullable private Counters stats; - private Set statuses = Set.of(); - private Map> indicators = Map.of(); - private Map> diagnoses = Map.of(); public Node(StreamInput in) throws IOException { super(in); stats = in.readOptionalWriteable(Counters::new); - statuses = in.readSet(HealthStatus::read); - indicators = in.readMap(HealthStatus::read, input -> input.readSet(StreamInput::readString)); - diagnoses = in.readMap(HealthStatus::read, input -> input.readSet(StreamInput::readString)); } public Node(DiscoveryNode node) { @@ -168,25 +129,10 @@ public void setStats(Counters stats) { this.stats = stats; } - public void setStatuses(Set statuses) { - this.statuses = statuses; - } - - public void setIndicators(Map> indicators) { - this.indicators = indicators; - } - - public void setDiagnoses(Map> diagnoses) { - this.diagnoses = diagnoses; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalWriteable(stats); - out.writeCollection(statuses); - out.writeMap(indicators, StreamOutput::writeEnum, (o, v) -> o.writeCollection(v, StreamOutput::writeString)); - out.writeMap(diagnoses, StreamOutput::writeEnum, (o, v) -> o.writeCollection(v, StreamOutput::writeString)); } } } diff --git a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsTransportAction.java b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsTransportAction.java index 946fcb1addbf..b5377e2a98cd 100644 --- a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsTransportAction.java +++ b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsTransportAction.java @@ -79,9 +79,6 @@ protected HealthApiStatsAction.Response.Node nodeOperation(HealthApiStatsAction. if (healthApiStats.hasCounters()) { statsResponse.setStats(healthApiStats.getStats()); } - statsResponse.setStatuses(healthApiStats.getStatuses()); - statsResponse.setIndicators(healthApiStats.getIndicators()); - statsResponse.setDiagnoses(healthApiStats.getDiagnoses()); return statsResponse; } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java index 42eaa0a38445..b2528cf9f87a 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java @@ -17,6 +17,7 @@ public interface HttpServerTransport extends LifecycleComponent, ReportingService { + String HTTP_PROFILE_NAME = ".http"; String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; BoundTransportAddress boundAddress(); diff --git a/server/src/main/java/org/elasticsearch/http/HttpTracer.java b/server/src/main/java/org/elasticsearch/http/HttpTracer.java index 2978f00d7a16..9e879c44aea6 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTracer.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTracer.java @@ -16,6 +16,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -33,6 +34,11 @@ class HttpTracer { private volatile String[] tracerLogInclude; private volatile String[] tracerLogExclude; + // for testing + HttpTracer() { + tracerLogInclude = tracerLogExclude = new String[0]; + } + HttpTracer(Settings settings, ClusterSettings clusterSettings) { setTracerLogInclude(HttpTransportSettings.SETTING_HTTP_TRACE_LOG_INCLUDE.get(settings)); @@ -55,14 +61,17 @@ class HttpTracer { @Nullable HttpTracer maybeLogRequest(RestRequest restRequest, @Nullable Exception e) { if (logger.isTraceEnabled() && TransportService.shouldTraceAction(restRequest.uri(), tracerLogInclude, tracerLogExclude)) { + // trace.id in the response log is included from threadcontext, which isn't set at request log time + // so include it here as part of the message logger.trace( () -> format( - "[%s][%s][%s][%s] received request from [%s]", + "[%s][%s][%s][%s] received request from [%s]%s", restRequest.getRequestId(), restRequest.header(Task.X_OPAQUE_ID_HTTP_HEADER), restRequest.method(), restRequest.uri(), - restRequest.getHttpChannel() + restRequest.getHttpChannel(), + RestUtils.extractTraceId(restRequest.header(Task.TRACE_PARENT_HTTP_HEADER)).map(t -> " trace.id: " + t).orElse("") ), e ); @@ -89,6 +98,7 @@ void logResponse( long requestId, boolean success ) { + // trace id is included in the ThreadContext for the response logger.trace( () -> format( "[%s][%s][%s][%s][%s] sent response to [%s] success [%s]", diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index e6ec4b98f189..0c8248313469 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -9,17 +9,22 @@ package org.elasticsearch.index; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collection; +import java.util.Iterator; import java.util.List; import static org.elasticsearch.core.Strings.format; @@ -238,16 +243,53 @@ public void onStoreClosed(ShardId shardId) { } } - @Override - public void beforeIndexShardRecovery(final IndexShard indexShard, final IndexSettings indexSettings) { - for (IndexEventListener listener : listeners) { + private void iterateBeforeIndexShardRecovery( + final IndexShard indexShard, + final IndexSettings indexSettings, + final Iterator iterator, + final ActionListener outerListener + ) { + while (iterator.hasNext()) { + final var nextListener = iterator.next(); + final var future = new ListenableFuture(); try { - listener.beforeIndexShardRecovery(indexShard, indexSettings); + nextListener.beforeIndexShardRecovery(indexShard, indexSettings, future); + if (future.isDone()) { + // common case, not actually async, so just check for an exception and continue on the same thread + future.get(); + continue; + } } catch (Exception e) { - logger.warn(() -> format("failed to invoke the listener before the shard recovery starts for %s", indexShard.shardId()), e); - throw e; + outerListener.onFailure(e); + return; } + + // future was not completed straight away, but might be done by now, so continue on a fresh thread to avoid stack overflow + future.addListener( + outerListener.delegateFailure( + (delegate, v) -> indexShard.getThreadPool() + .executor(ThreadPool.Names.GENERIC) + .execute( + ActionRunnable.wrap(delegate, l -> iterateBeforeIndexShardRecovery(indexShard, indexSettings, iterator, l)) + ) + ) + ); + return; } + + outerListener.onResponse(null); + } + + @Override + public void beforeIndexShardRecovery( + final IndexShard indexShard, + final IndexSettings indexSettings, + final ActionListener outerListener + ) { + iterateBeforeIndexShardRecovery(indexShard, indexSettings, listeners.iterator(), outerListener.delegateResponse((l, e) -> { + logger.warn(() -> format("failed to invoke the listener before the shard recovery starts for %s", indexShard.shardId()), e); + l.onFailure(e); + })); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 71a824535392..b46b8334408f 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.ProvidedIdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.TsidExtractingIdFieldMapper; @@ -110,6 +111,9 @@ public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { public boolean shouldValidateTimestamp() { return false; } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) {} }, TIME_SERIES("time_series") { @Override @@ -196,6 +200,13 @@ public DocumentDimensions buildDocumentDimensions(IndexSettings settings) { public boolean shouldValidateTimestamp() { return true; } + + @Override + public void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper) { + if (sourceFieldMapper.isSynthetic() == false) { + throw new IllegalArgumentException("time series indices only support synthetic source"); + } + } }; protected static String tsdbMode() { @@ -310,6 +321,11 @@ public String getName() { */ public abstract boolean shouldValidateTimestamp(); + /** + * Validates the source field mapper + */ + public abstract void validateSourceFieldMapper(SourceFieldMapper sourceFieldMapper); + /** * Parse a string into an {@link IndexMode}. */ diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 8fcd4a29c6b0..9f2e1c76e24d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -23,6 +23,7 @@ import org.elasticsearch.Version; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -33,6 +34,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -44,9 +46,11 @@ import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.FsDirectoryFactory; import org.elasticsearch.indices.IndicesQueryCache; @@ -147,7 +151,17 @@ public final class IndexModule { * created by {@link org.elasticsearch.plugins.IndexStorePlugin.DirectoryFactory}. */ @FunctionalInterface - public interface DirectoryWrapper extends CheckedFunction {} + public interface DirectoryWrapper { + /** + * Wrap a given {@link Directory} + * + * @param directory the {@link Directory} to wrap + * @param shardRouting the {@link ShardRouting} associated with the {@link Directory} or {@code null} is unknown + * @return a {@link Directory} + * @throws IOException + */ + Directory wrap(Directory directory, @Nullable ShardRouting shardRouting) throws IOException; + } private final IndexSettings indexSettings; private final AnalysisRegistry analysisRegistry; @@ -165,6 +179,9 @@ public interface DirectoryWrapper extends CheckedFunction recoveryStateFactories; + private final SetOnce indexCommitListener = new SetOnce<>(); + + private final SetOnce replicationTrackerFactory = new SetOnce<>(); /** * Construct the index module for the index with the specified index settings. The index module contains extension points for plugins @@ -370,6 +387,16 @@ public void setDirectoryWrapper(DirectoryWrapper wrapper) { this.indexDirectoryWrapper.set(Objects.requireNonNull(wrapper)); } + public void setIndexCommitListener(Engine.IndexCommitListener listener) { + ensureNotFrozen(); + this.indexCommitListener.set(Objects.requireNonNull(listener)); + } + + public void setReplicationTrackerFactory(ReplicationTracker.Factory factory) { + ensureNotFrozen(); + this.replicationTrackerFactory.set(factory); + } + IndexEventListener freeze() { // pkg private for testing if (this.frozen.compareAndSet(false, true)) { return new CompositeIndexEventListener(indexSettings, indexEventListeners); @@ -517,7 +544,9 @@ public IndexService newIndexService( valuesSourceRegistry, recoveryStateFactory, indexFoldersDeletionListener, - snapshotCommitSupplier + snapshotCommitSupplier, + indexCommitListener.get(), + Objects.requireNonNullElse(replicationTrackerFactory.get(), ReplicationTracker.DEFAULT_FACTORY) ); success = true; return indexService; @@ -559,7 +588,18 @@ private IndexStorePlugin.DirectoryFactory getDirectoryFactory( final DirectoryWrapper directoryWrapper = this.indexDirectoryWrapper.get(); assert frozen.get() : "IndexModule configuration not frozen"; if (directoryWrapper != null) { - return (idxSettings, shardPath) -> directoryWrapper.apply(factory.newDirectory(idxSettings, shardPath)); + return new IndexStorePlugin.DirectoryFactory() { + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + return newDirectory(indexSettings, shardPath, null); + } + + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath, ShardRouting shardRouting) + throws IOException { + return directoryWrapper.wrap(factory.newDirectory(indexSettings, shardPath, shardRouting), shardRouting); + } + }; } return factory; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index a107efaa9764..f50979aa82d2 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -54,6 +54,7 @@ import org.elasticsearch.index.mapper.NodeMappingStats; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.SearchIndexNameMatcher; +import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -112,6 +113,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final IndexStorePlugin.RecoveryStateFactory recoveryStateFactory; private final IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier; private final CheckedFunction readerWrapper; + private final Engine.IndexCommitListener indexCommitListener; private final IndexCache indexCache; private final MapperService mapperService; private final XContentParserConfiguration parserConfiguration; @@ -145,6 +147,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; + private final ReplicationTracker.Factory replicationTrackerFactory; + public IndexService( IndexSettings indexSettings, IndexCreationContext indexCreationContext, @@ -175,7 +179,9 @@ public IndexService( ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, IndexStorePlugin.IndexFoldersDeletionListener indexFoldersDeletionListener, - IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier + IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier, + Engine.IndexCommitListener indexCommitListener, + ReplicationTracker.Factory replicationTrackerFactory ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -242,6 +248,7 @@ public IndexService( this.readerWrapper = wrapperFactory.apply(this); this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners); this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); + this.indexCommitListener = indexCommitListener; try (var ignored = threadPool.getThreadContext().clearTraceContext()) { // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); @@ -249,6 +256,7 @@ public IndexService( this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); } + this.replicationTrackerFactory = replicationTrackerFactory; updateFsyncTaskIfNecessary(); } @@ -300,8 +308,10 @@ public IndexShard getShard(int shardId) { } public NodeMappingStats getNodeMappingStats() { + if (mapperService == null) { + return null; + } long totalCount = mapperService().mappingLookup().getTotalFieldsCount(); - Index index = index(); long totalEstimatedOverhead = totalCount * 1024L; // 1KiB estimated per mapping NodeMappingStats indexNodeMappingStats = new NodeMappingStats(totalCount, totalEstimatedOverhead); return indexNodeMappingStats; @@ -484,7 +494,7 @@ public synchronized IndexShard createShard( warmer.warm(reader, shard, IndexService.this.indexSettings); } }; - Directory directory = directoryFactory.newDirectory(this.indexSettings, path); + final Directory directory = directoryFactory.newDirectory(this.indexSettings, path, routing); store = new Store( shardId, this.indexSettings, @@ -514,7 +524,9 @@ public synchronized IndexShard createShard( retentionLeaseSyncer, circuitBreakerService, snapshotCommitSupplier, - System::nanoTime + System::nanoTime, + indexCommitListener, + replicationTrackerFactory ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index dfbfb47937b2..8dd27a2344c1 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -251,9 +251,15 @@ public final class IndexSettings { Property.IndexScope ); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); + public static final Setting NODE_DEFAULT_REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "node._internal.default_refresh_interval", + DEFAULT_REFRESH_INTERVAL, + new TimeValue(-1, TimeUnit.MILLISECONDS), + Property.NodeScope + ); public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( "index.refresh_interval", - DEFAULT_REFRESH_INTERVAL, + NODE_DEFAULT_REFRESH_INTERVAL_SETTING, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope @@ -774,7 +780,10 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mappingDimensionFieldsLimit = scopedSettings.get(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING); indexRouting = IndexRouting.fromIndexMetadata(indexMetadata); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); + scopedSettings.addSettingsUpdateConsumer( + MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, + mergePolicyConfig::setCompoundFormatThreshold + ); scopedSettings.addSettingsUpdateConsumer( MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, mergePolicyConfig::setDeletesPctAllowed diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index 12d84a57b359..80ae79c0f2a5 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -71,8 +71,8 @@ * * Controls the maximum percentage of deleted documents that is tolerated in * the index. Lower values make the index more space efficient at the - * expense of increased CPU and I/O activity. Values must be between 20 and - * 50. Default value is 33. + * expense of increased CPU and I/O activity. Values must be between 5 and + * 50. Default value is 20. * * *

    @@ -110,11 +110,12 @@ public final class MergePolicyConfig { public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10; public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; - public static final double DEFAULT_DELETES_PCT_ALLOWED = 33.0d; - public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( - "index.compound_format", - Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, + public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + private static final String INDEX_COMPOUND_FORMAT_SETTING_KEY = "index.compound_format"; + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( + INDEX_COMPOUND_FORMAT_SETTING_KEY, + "1gb", + MergePolicyConfig::parseCompoundFormat, Property.Dynamic, Property.IndexScope ); @@ -163,7 +164,7 @@ public final class MergePolicyConfig { public static final Setting INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING = Setting.doubleSetting( "index.merge.policy.deletes_pct_allowed", DEFAULT_DELETES_PCT_ALLOWED, - 20.0d, + 5.0d, 50.0d, Property.Dynamic, Property.IndexScope @@ -189,7 +190,7 @@ public final class MergePolicyConfig { ); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING).configure(mergePolicy); mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); @@ -229,8 +230,8 @@ void setExpungeDeletesAllowed(Double value) { mergePolicy.setForceMergeDeletesPctAllowed(value); } - void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); + void setCompoundFormatThreshold(CompoundFileThreshold compoundFileThreshold) { + compoundFileThreshold.configure(mergePolicy); } void setDeletesPctAllowed(Double deletesPctAllowed) { @@ -261,25 +262,81 @@ MergePolicy getMergePolicy() { return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; } - private static double parseNoCFSRatio(String noCFSRatio) { + private static CompoundFileThreshold parseCompoundFormat(String noCFSRatio) { noCFSRatio = noCFSRatio.trim(); if (noCFSRatio.equalsIgnoreCase("true")) { - return 1.0d; + return new CompoundFileThreshold(1.0d); } else if (noCFSRatio.equalsIgnoreCase("false")) { - return 0.0; + return new CompoundFileThreshold(0.0d); } else { try { - double value = Double.parseDouble(noCFSRatio); - if (value < 0.0 || value > 1.0) { - throw new IllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); + try { + return new CompoundFileThreshold(Double.parseDouble(noCFSRatio)); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException( + "index.compound_format must be a boolean, a non-negative byte size or a ratio in the interval [0..1] but was: [" + + noCFSRatio + + "]", + ex + ); } - return value; - } catch (NumberFormatException ex) { + } catch (IllegalArgumentException e) { + try { + return new CompoundFileThreshold(ByteSizeValue.parseBytesSizeValue(noCFSRatio, INDEX_COMPOUND_FORMAT_SETTING_KEY)); + } catch (RuntimeException e2) { + e.addSuppressed(e2); + } + throw e; + } + } + } + + public static class CompoundFileThreshold { + private Double noCFSRatio; + private ByteSizeValue noCFSSize; + + private CompoundFileThreshold(double noCFSRatio) { + if (noCFSRatio < 0.0 || noCFSRatio > 1.0) { throw new IllegalArgumentException( - "Expected a boolean or a value in the interval [0..1] but was: " + "[" + noCFSRatio + "]", - ex + "index.compound_format must be a boolean, a non-negative byte size or a ratio in the interval [0..1] but was: [" + + noCFSRatio + + "]" ); } + this.noCFSRatio = noCFSRatio; + this.noCFSSize = null; + } + + private CompoundFileThreshold(ByteSizeValue noCFSSize) { + if (noCFSSize.getBytes() < 0) { + throw new IllegalArgumentException( + "index.compound_format must be a boolean, a non-negative byte size or a ratio in the interval [0..1] but was: [" + + noCFSSize + + "]" + ); + } + this.noCFSRatio = null; + this.noCFSSize = noCFSSize; + } + + void configure(MergePolicy mergePolicy) { + if (noCFSRatio != null) { + assert noCFSSize == null; + mergePolicy.setNoCFSRatio(noCFSRatio); + mergePolicy.setMaxCFSSegmentSizeMB(Double.POSITIVE_INFINITY); + } else { + mergePolicy.setNoCFSRatio(1.0); + mergePolicy.setMaxCFSSegmentSizeMB(noCFSSize.getMbFrac()); + } + } + + @Override + public String toString() { + if (noCFSRatio != null) { + return "max CFS ratio: " + noCFSRatio; + } else { + return "max CFS size: " + noCFSSize; + } } } } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 266d84f394ac..f1f03eff88d0 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; +import org.elasticsearch.lucene.util.BitSets; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; @@ -104,7 +105,7 @@ public static BitSet bitsetFromQuery(Query query, LeafReaderContext context) thr if (s == null) { return null; } else { - return BitSet.of(s.iterator(), context.reader().maxDoc()); + return BitSets.of(s.iterator(), context.reader().maxDoc()); } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java index 0daf88610fbe..990d44f5baef 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/server/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -9,7 +9,7 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.MapperService; @@ -35,11 +35,11 @@ public class CodecService { public CodecService(@Nullable MapperService mapperService, BigArrays bigArrays) { final var codecs = new HashMap(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene94Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene94Codec(Lucene94Codec.Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene95Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Lucene95Codec.Mode.BEST_COMPRESSION)); } else { - codecs.put(DEFAULT_CODEC, new PerFieldMapperCodec(Lucene94Codec.Mode.BEST_SPEED, mapperService, bigArrays)); - codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMapperCodec(Lucene94Codec.Mode.BEST_COMPRESSION, mapperService, bigArrays)); + codecs.put(DEFAULT_CODEC, new PerFieldMapperCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, bigArrays)); + codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMapperCodec(Lucene95Codec.Mode.BEST_COMPRESSION, mapperService, bigArrays)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index 268ba302033f..117276858571 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -13,14 +13,20 @@ import org.apache.lucene.codecs.KnnVectorsFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; -import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat; +import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; +import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; /** @@ -31,11 +37,12 @@ * per index in real time via the mapping API. If no specific postings format or vector format is * configured for a specific field the default postings or vector format is used. */ -public class PerFieldMapperCodec extends Lucene94Codec { - private final MapperService mapperService; +public class PerFieldMapperCodec extends Lucene95Codec { + private final MapperService mapperService; private final DocValuesFormat docValuesFormat = new Lucene90DocValuesFormat(); - private final ES85BloomFilterPostingsFormat bloomFilterPostingsFormat; + private final ES87BloomFilterPostingsFormat bloomFilterPostingsFormat; + private final ES87TSDBDocValuesFormat tsdbDocValuesFormat; static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMapperCodec.class) @@ -45,7 +52,8 @@ public class PerFieldMapperCodec extends Lucene94Codec { public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService, BigArrays bigArrays) { super(compressionMode); this.mapperService = mapperService; - this.bloomFilterPostingsFormat = new ES85BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); + this.bloomFilterPostingsFormat = new ES87BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); + this.tsdbDocValuesFormat = new ES87TSDBDocValuesFormat(); } @Override @@ -64,10 +72,19 @@ private PostingsFormat internalGetPostingsFormatForField(String field) { return super.getPostingsFormatForField(field); } - private boolean useBloomFilter(String field) { - return IdFieldMapper.NAME.equals(field) - && mapperService.mappingLookup().isDataStreamTimestampFieldEnabled() == false - && IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING.get(mapperService.getIndexSettings().getSettings()); + boolean useBloomFilter(String field) { + IndexSettings indexSettings = mapperService.getIndexSettings(); + if (mapperService.mappingLookup().isDataStreamTimestampFieldEnabled()) { + // In case for time series indices, they _id isn't randomly generated, + // but based on dimension fields and timestamp field, so during indexing + // version/seq_no/term needs to be looked up and having a bloom filter + // can speed this up significantly. + return indexSettings.getMode() == IndexMode.TIME_SERIES + && IdFieldMapper.NAME.equals(field) + && IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING.get(indexSettings.getSettings()); + } else { + return IdFieldMapper.NAME.equals(field) && IndexSettings.BLOOM_FILTER_ID_FIELD_ENABLED_SETTING.get(indexSettings.getSettings()); + } } @Override @@ -84,6 +101,39 @@ public KnnVectorsFormat getKnnVectorsFormatForField(String field) { @Override public DocValuesFormat getDocValuesFormatForField(String field) { + if (useTSDBDocValuesFormat(field)) { + return tsdbDocValuesFormat; + } return docValuesFormat; } + + private boolean useTSDBDocValuesFormat(final String field) { + return IndexSettings.isTimeSeriesModeEnabled() + && isTimeSeriesModeIndex() + && isNotSpecialField(field) + && (isCounterMetricType(field) || isTimestampField(field)); + } + + private boolean isTimeSeriesModeIndex() { + return IndexMode.TIME_SERIES.equals(mapperService.getIndexSettings().getMode()); + } + + private boolean isCounterMetricType(String field) { + if (mapperService != null) { + final MappingLookup mappingLookup = mapperService.mappingLookup(); + if (mappingLookup.getMapper(field) instanceof NumberFieldMapper) { + final MappedFieldType fieldType = mappingLookup.getFieldType(field); + return TimeSeriesParams.MetricType.COUNTER.equals(fieldType.getMetricType()); + } + } + return false; + } + + private boolean isTimestampField(String field) { + return "@timestamp".equals(field); + } + + private boolean isNotSpecialField(String field) { + return field.startsWith("_") == false; + } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java index 4b4a77f914f5..d26fb52a82bc 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java +++ b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES85BloomFilterPostingsFormat.java @@ -22,11 +22,9 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; -import org.apache.lucene.codecs.NormsProducer; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.BaseTermsEnum; import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.IndexFileNames; @@ -45,10 +43,6 @@ import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.core.IOUtils; import java.io.Closeable; @@ -60,9 +54,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.function.Function; /** * This implementation is forked from Lucene's BloomFilterPosting to support on-disk bloom filters. @@ -77,26 +69,13 @@ public class ES85BloomFilterPostingsFormat extends PostingsFormat { static final String BLOOM_FILTER_META_FILE = "bfm"; static final String BLOOM_FILTER_INDEX_FILE = "bfi"; - private Function postingsFormats; - private BigArrays bigArrays; - - public ES85BloomFilterPostingsFormat(BigArrays bigArrays, Function postingsFormats) { - this(); - this.bigArrays = Objects.requireNonNull(bigArrays); - this.postingsFormats = Objects.requireNonNull(postingsFormats); - } - public ES85BloomFilterPostingsFormat() { super(BLOOM_CODEC_NAME); } @Override public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { - if (postingsFormats == null || bigArrays == null) { - assert false : BLOOM_CODEC_NAME + " was initialized with a wrong constructor"; - throw new UnsupportedOperationException(BLOOM_CODEC_NAME + " was initialized with a wrong constructor"); - } - return new FieldsWriter(state); + throw new UnsupportedOperationException(); } @Override @@ -109,128 +88,15 @@ public String toString() { return BLOOM_CODEC_NAME; } - private static String metaFile(SegmentInfo si, String segmentSuffix) { + static String metaFile(SegmentInfo si, String segmentSuffix) { return IndexFileNames.segmentFileName(si.name, segmentSuffix, BLOOM_FILTER_META_FILE); } - private static String indexFile(SegmentInfo si, String segmentSuffix) { + static String indexFile(SegmentInfo si, String segmentSuffix) { return IndexFileNames.segmentFileName(si.name, segmentSuffix, BLOOM_FILTER_INDEX_FILE); } - final class FieldsWriter extends FieldsConsumer { - private final SegmentWriteState state; - private final IndexOutput indexOut; - private final List bloomFilters = new ArrayList<>(); - private final List fieldsGroups = new ArrayList<>(); - private final List toCloses = new ArrayList<>(); - private boolean closed; - - FieldsWriter(SegmentWriteState state) throws IOException { - this.state = state; - boolean success = false; - try { - indexOut = state.directory.createOutput(indexFile(state.segmentInfo, state.segmentSuffix), state.context); - toCloses.add(indexOut); - CodecUtil.writeIndexHeader(indexOut, BLOOM_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); - success = true; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(toCloses); - } - } - } - - @Override - public void write(Fields fields, NormsProducer norms) throws IOException { - writePostings(fields, norms); - writeBloomFilters(fields); - } - - private void writePostings(Fields fields, NormsProducer norms) throws IOException { - final Map currentGroups = new HashMap<>(); - for (String field : fields) { - final PostingsFormat postingsFormat = postingsFormats.apply(field); - if (postingsFormat == null) { - throw new IllegalStateException("PostingsFormat for field [" + field + "] wasn't specified"); - } - FieldsGroup group = currentGroups.get(postingsFormat); - if (group == null) { - group = new FieldsGroup(postingsFormat, Integer.toString(fieldsGroups.size()), new ArrayList<>()); - currentGroups.put(postingsFormat, group); - fieldsGroups.add(group); - } - group.fields.add(field); - } - for (FieldsGroup group : currentGroups.values()) { - final FieldsConsumer writer = group.postingsFormat.fieldsConsumer(new SegmentWriteState(state, group.suffix)); - toCloses.add(writer); - final Fields maskedFields = new FilterLeafReader.FilterFields(fields) { - @Override - public Iterator iterator() { - return group.fields.iterator(); - } - }; - writer.write(maskedFields, norms); - } - } - - private void writeBloomFilters(Fields fields) throws IOException { - for (String field : fields) { - final Terms terms = fields.terms(field); - if (terms == null) { - continue; - } - final int bloomFilterSize = bloomFilterSize(state.segmentInfo.maxDoc()); - final int numBytes = numBytesForBloomFilter(bloomFilterSize); - try (ByteArray buffer = bigArrays.newByteArray(numBytes)) { - final TermsEnum termsEnum = terms.iterator(); - while (true) { - final BytesRef term = termsEnum.next(); - if (term == null) { - break; - } - final int hash = hashTerm(term) % bloomFilterSize; - final int pos = hash >> 3; - final int mask = 1 << (hash & 0x7); - final byte val = (byte) (buffer.get(pos) | mask); - buffer.set(pos, val); - } - bloomFilters.add(new BloomFilter(field, indexOut.getFilePointer(), bloomFilterSize)); - final BytesReference bytes = BytesReference.fromByteArray(buffer, numBytes); - bytes.writeTo(new IndexOutputOutputStream(indexOut)); - } - } - } - - @Override - public void close() throws IOException { - if (closed) { - return; - } - closed = true; - try { - CodecUtil.writeFooter(indexOut); - } finally { - IOUtils.close(toCloses); - } - try (IndexOutput metaOut = state.directory.createOutput(metaFile(state.segmentInfo, state.segmentSuffix), state.context)) { - CodecUtil.writeIndexHeader(metaOut, BLOOM_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); - // write postings formats - metaOut.writeVInt(fieldsGroups.size()); - for (FieldsGroup group : fieldsGroups) { - group.writeTo(metaOut, state.fieldInfos); - } - // Write bloom filters - metaOut.writeVInt(bloomFilters.size()); - for (BloomFilter bloomFilter : bloomFilters) { - bloomFilter.writeTo(metaOut, state.fieldInfos); - } - CodecUtil.writeFooter(metaOut); - } - } - } - - private record BloomFilter(String field, long startFilePointer, int bloomFilterSize) { + record BloomFilter(String field, long startFilePointer, int bloomFilterSize) { void writeTo(IndexOutput out, FieldInfos fieldInfos) throws IOException { out.writeVInt(fieldInfos.fieldInfo(field).number); out.writeVLong(startFilePointer); @@ -245,7 +111,7 @@ static BloomFilter readFrom(IndexInput in, FieldInfos fieldInfos) throws IOExcep } } - private record FieldsGroup(PostingsFormat postingsFormat, String suffix, List fields) { + record FieldsGroup(PostingsFormat postingsFormat, String suffix, List fields) { void writeTo(IndexOutput out, FieldInfos fieldInfos) throws IOException { out.writeString(postingsFormat.getName()); out.writeString(suffix); diff --git a/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java new file mode 100644 index 000000000000..191fe8f75b2f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/bloomfilter/ES87BloomFilterPostingsFormat.java @@ -0,0 +1,774 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2023 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.bloomfilter; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.NormsProducer; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.index.BaseTermsEnum; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FilterLeafReader; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.TermState; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.AttributeSource; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.ByteArray; +import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.core.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; + +/** + * This implementation is forked from Lucene's BloomFilterPosting to support on-disk bloom filters. + *

    + * A {@link PostingsFormat} useful for low doc-frequency fields such as primary keys. Bloom filters + * offers "fast-fail" for reads in segments known to have no record of the key. + */ +public class ES87BloomFilterPostingsFormat extends PostingsFormat { + static final String BLOOM_CODEC_NAME = "ES87BloomFilter"; + static final int VERSION_START = 0; + static final int VERSION_CURRENT = VERSION_START; + static final String BLOOM_FILTER_META_FILE = "bfm"; + static final String BLOOM_FILTER_INDEX_FILE = "bfi"; + /** Bloom filters target 10 bits per entry, which, along with 7 hash functions, yields about 1% false positives. */ + private static final int BITS_PER_ENTRY = 10; + /** The optimal number of hash functions for a bloom filter is approximately 0.7 times the number of bits per entry. */ + private static final int NUM_HASH_FUNCTIONS = 7; + + private Function postingsFormats; + private BigArrays bigArrays; + + public ES87BloomFilterPostingsFormat(BigArrays bigArrays, Function postingsFormats) { + this(); + this.bigArrays = Objects.requireNonNull(bigArrays); + this.postingsFormats = Objects.requireNonNull(postingsFormats); + } + + public ES87BloomFilterPostingsFormat() { + super(BLOOM_CODEC_NAME); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + if (postingsFormats == null || bigArrays == null) { + assert false : BLOOM_CODEC_NAME + " was initialized with a wrong constructor"; + throw new UnsupportedOperationException(BLOOM_CODEC_NAME + " was initialized with a wrong constructor"); + } + return new FieldsWriter(state); + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + return new FieldsReader(state); + } + + @Override + public String toString() { + return BLOOM_CODEC_NAME; + } + + private static String metaFile(SegmentInfo si, String segmentSuffix) { + return IndexFileNames.segmentFileName(si.name, segmentSuffix, BLOOM_FILTER_META_FILE); + } + + private static String indexFile(SegmentInfo si, String segmentSuffix) { + return IndexFileNames.segmentFileName(si.name, segmentSuffix, BLOOM_FILTER_INDEX_FILE); + } + + final class FieldsWriter extends FieldsConsumer { + private final SegmentWriteState state; + private final IndexOutput indexOut; + private final List bloomFilters = new ArrayList<>(); + private final List fieldsGroups = new ArrayList<>(); + private final List toCloses = new ArrayList<>(); + private boolean closed; + private final int[] hashes = new int[NUM_HASH_FUNCTIONS]; + + FieldsWriter(SegmentWriteState state) throws IOException { + this.state = state; + boolean success = false; + try { + indexOut = state.directory.createOutput(indexFile(state.segmentInfo, state.segmentSuffix), state.context); + toCloses.add(indexOut); + CodecUtil.writeIndexHeader(indexOut, BLOOM_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(toCloses); + } + } + } + + @Override + public void write(Fields fields, NormsProducer norms) throws IOException { + writePostings(fields, norms); + writeBloomFilters(fields); + } + + private void writePostings(Fields fields, NormsProducer norms) throws IOException { + final Map currentGroups = new HashMap<>(); + for (String field : fields) { + final PostingsFormat postingsFormat = postingsFormats.apply(field); + if (postingsFormat == null) { + throw new IllegalStateException("PostingsFormat for field [" + field + "] wasn't specified"); + } + FieldsGroup group = currentGroups.get(postingsFormat); + if (group == null) { + group = new FieldsGroup(postingsFormat, Integer.toString(fieldsGroups.size()), new ArrayList<>()); + currentGroups.put(postingsFormat, group); + fieldsGroups.add(group); + } + group.fields.add(field); + } + for (FieldsGroup group : currentGroups.values()) { + final FieldsConsumer writer = group.postingsFormat.fieldsConsumer(new SegmentWriteState(state, group.suffix)); + toCloses.add(writer); + final Fields maskedFields = new FilterLeafReader.FilterFields(fields) { + @Override + public Iterator iterator() { + return group.fields.iterator(); + } + }; + writer.write(maskedFields, norms); + } + } + + private void writeBloomFilters(Fields fields) throws IOException { + for (String field : fields) { + final Terms terms = fields.terms(field); + if (terms == null) { + continue; + } + final int bloomFilterSize = bloomFilterSize(state.segmentInfo.maxDoc()); + final int numBytes = numBytesForBloomFilter(bloomFilterSize); + try (ByteArray buffer = bigArrays.newByteArray(numBytes)) { + final TermsEnum termsEnum = terms.iterator(); + while (true) { + final BytesRef term = termsEnum.next(); + if (term == null) { + break; + } + + hashTerm(term, hashes); + for (int hash : hashes) { + hash = hash % bloomFilterSize; + final int pos = hash >> 3; + final int mask = 1 << (hash & 7); + final byte val = (byte) (buffer.get(pos) | mask); + buffer.set(pos, val); + } + } + bloomFilters.add(new BloomFilter(field, indexOut.getFilePointer(), bloomFilterSize)); + final BytesReference bytes = BytesReference.fromByteArray(buffer, numBytes); + bytes.writeTo(new IndexOutputOutputStream(indexOut)); + } + } + } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + final long indexFileLength; + closed = true; + try { + CodecUtil.writeFooter(indexOut); + indexFileLength = indexOut.getFilePointer(); + } finally { + IOUtils.close(toCloses); + } + try (IndexOutput metaOut = state.directory.createOutput(metaFile(state.segmentInfo, state.segmentSuffix), state.context)) { + CodecUtil.writeIndexHeader(metaOut, BLOOM_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + // write postings formats + metaOut.writeVInt(fieldsGroups.size()); + for (FieldsGroup group : fieldsGroups) { + group.writeTo(metaOut, state.fieldInfos); + } + // Write bloom filters metadata + metaOut.writeVInt(bloomFilters.size()); + for (BloomFilter bloomFilter : bloomFilters) { + bloomFilter.writeTo(metaOut, state.fieldInfos); + } + metaOut.writeVLong(indexFileLength); + CodecUtil.writeFooter(metaOut); + } + } + } + + private record BloomFilter(String field, long startFilePointer, int bloomFilterSize) { + void writeTo(IndexOutput out, FieldInfos fieldInfos) throws IOException { + out.writeVInt(fieldInfos.fieldInfo(field).number); + out.writeVLong(startFilePointer); + out.writeVInt(bloomFilterSize); + } + + static BloomFilter readFrom(IndexInput in, FieldInfos fieldInfos) throws IOException { + final String fieldName = fieldInfos.fieldInfo(in.readVInt()).name; + final long startFilePointer = in.readVLong(); + final int bloomFilterSize = in.readVInt(); + return new BloomFilter(fieldName, startFilePointer, bloomFilterSize); + } + } + + private record FieldsGroup(PostingsFormat postingsFormat, String suffix, List fields) { + void writeTo(IndexOutput out, FieldInfos fieldInfos) throws IOException { + out.writeString(postingsFormat.getName()); + out.writeString(suffix); + out.writeVInt(fields.size()); + for (String field : fields) { + out.writeVInt(fieldInfos.fieldInfo(field).number); + } + } + + static FieldsGroup readFrom(IndexInput in, FieldInfos fieldInfos) throws IOException { + final PostingsFormat postingsFormat = forName(in.readString()); + final String suffix = in.readString(); + final int numFields = in.readVInt(); + final List fields = new ArrayList<>(); + for (int i = 0; i < numFields; i++) { + fields.add(fieldInfos.fieldInfo(in.readVInt()).name); + } + return new FieldsGroup(postingsFormat, suffix, fields); + } + } + + static final class FieldsReader extends FieldsProducer { + private final Map bloomFilters; + private final List toCloses = new ArrayList<>(); + private final Map readerMap = new HashMap<>(); + private final IndexInput indexIn; + + FieldsReader(SegmentReadState state) throws IOException { + boolean success = false; + try ( + ChecksumIndexInput metaIn = state.directory.openChecksumInput( + metaFile(state.segmentInfo, state.segmentSuffix), + IOContext.READONCE + ) + ) { + Map bloomFilters = null; + Throwable priorE = null; + long indexFileLength = 0; + try { + CodecUtil.checkIndexHeader( + metaIn, + BLOOM_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + // read postings formats + final int numFieldsGroups = metaIn.readVInt(); + for (int i = 0; i < numFieldsGroups; i++) { + final FieldsGroup group = FieldsGroup.readFrom(metaIn, state.fieldInfos); + final FieldsProducer reader = group.postingsFormat.fieldsProducer(new SegmentReadState(state, group.suffix)); + toCloses.add(reader); + for (String field : group.fields) { + readerMap.put(field, reader); + } + } + // read bloom filters + final int numBloomFilters = metaIn.readVInt(); + bloomFilters = new HashMap<>(numBloomFilters); + for (int i = 0; i < numBloomFilters; i++) { + final BloomFilter bloomFilter = BloomFilter.readFrom(metaIn, state.fieldInfos); + bloomFilters.put(bloomFilter.field, bloomFilter); + } + + indexFileLength = metaIn.readVLong(); + } catch (Throwable t) { + priorE = t; + } finally { + CodecUtil.checkFooter(metaIn, priorE); + } + this.bloomFilters = bloomFilters; + indexIn = state.directory.openInput(indexFile(state.segmentInfo, state.segmentSuffix), state.context); + toCloses.add(indexIn); + CodecUtil.checkIndexHeader( + indexIn, + BLOOM_CODEC_NAME, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + CodecUtil.retrieveChecksum(indexIn, indexFileLength); + assert assertBloomFilterSizes(state.segmentInfo); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(toCloses); + } + } + } + + private boolean assertBloomFilterSizes(SegmentInfo segmentInfo) { + for (BloomFilter bloomFilter : bloomFilters.values()) { + assert bloomFilter.bloomFilterSize == bloomFilterSize(segmentInfo.maxDoc()) + : "bloom_filter=" + bloomFilter + ", max_docs=" + segmentInfo.maxDoc(); + } + return true; + } + + @Override + public Iterator iterator() { + return readerMap.keySet().iterator(); + } + + @Override + public void close() throws IOException { + IOUtils.close(toCloses); + } + + @Override + public Terms terms(String field) throws IOException { + final FieldsProducer reader = readerMap.get(field); + if (reader == null) { + return null; + } + final Terms terms = reader.terms(field); + if (terms == null) { + return null; + } + final BloomFilter bloomFilter = bloomFilters.get(field); + if (bloomFilter != null) { + final RandomAccessInput data = indexIn.randomAccessSlice( + bloomFilter.startFilePointer(), + numBytesForBloomFilter(bloomFilter.bloomFilterSize) + ); + return new BloomFilterTerms(terms, data, bloomFilter.bloomFilterSize); + } else { + return terms; + } + } + + @Override + public int size() { + return readerMap.size(); + } + + @Override + public void checkIntegrity() throws IOException { + // already fully checked the meta file; let's fully checked the index file. + CodecUtil.checksumEntireFile(indexIn); + // multiple fields can share the same reader + final Set seenReaders = new HashSet<>(); + for (FieldsProducer reader : readerMap.values()) { + if (seenReaders.add(reader)) { + reader.checkIntegrity(); + } + } + } + } + + private static class BloomFilterTerms extends FilterLeafReader.FilterTerms { + private final RandomAccessInput data; + private final int bloomFilterSize; + private final int[] hashes = new int[NUM_HASH_FUNCTIONS]; + + BloomFilterTerms(Terms in, RandomAccessInput data, int bloomFilterSize) { + super(in); + this.data = data; + this.bloomFilterSize = bloomFilterSize; + } + + private boolean mayContainTerm(BytesRef term) throws IOException { + hashTerm(term, hashes); + for (int hash : hashes) { + hash = hash % bloomFilterSize; + final int pos = hash >> 3; + final int mask = 1 << (hash & 7); + final byte bits = data.readByte(pos); + if ((bits & mask) == 0) { + return false; + } + } + return true; + } + + @Override + public TermsEnum iterator() throws IOException { + return new LazyFilterTermsEnum() { + private TermsEnum delegate; + + @Override + TermsEnum getDelegate() throws IOException { + if (delegate == null) { + delegate = in.iterator(); + } + return delegate; + } + + @Override + public boolean seekExact(BytesRef term) throws IOException { + if (mayContainTerm(term)) { + return getDelegate().seekExact(term); + } else { + return false; + } + } + + @Override + public void seekExact(BytesRef term, TermState state) throws IOException { + getDelegate().seekExact(term, state); + } + + @Override + public TermState termState() throws IOException { + // TODO: return TermState that includes BloomFilter and fix _disk_usage API + return getDelegate().termState(); + } + }; + } + } + + private abstract static class LazyFilterTermsEnum extends BaseTermsEnum { + abstract TermsEnum getDelegate() throws IOException; + + @Override + public SeekStatus seekCeil(BytesRef text) throws IOException { + return getDelegate().seekCeil(text); + } + + @Override + public void seekExact(long ord) throws IOException { + getDelegate().seekExact(ord); + } + + @Override + public BytesRef term() throws IOException { + return getDelegate().term(); + } + + @Override + public long ord() throws IOException { + return getDelegate().ord(); + } + + @Override + public int docFreq() throws IOException { + return getDelegate().docFreq(); + } + + @Override + public long totalTermFreq() throws IOException { + return getDelegate().totalTermFreq(); + } + + @Override + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return getDelegate().postings(reuse, flags); + } + + @Override + public ImpactsEnum impacts(int flags) throws IOException { + return getDelegate().impacts(flags); + } + + @Override + public BytesRef next() throws IOException { + return getDelegate().next(); + } + + @Override + public AttributeSource attributes() { + try { + return getDelegate().attributes(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } + + static int bloomFilterSize(int maxDocs) { + if (maxDocs < 1) { + throw new IllegalStateException("maxDocs must be greater than or equal to 1, got " + maxDocs); + } + // 10% saturation (i.e., 10 bits for each term) + long numBits = maxDocs * (long) BITS_PER_ENTRY; + // Round to the next multiple of 8 since we can only store whole bytes + numBits = ((numBits - 1) | 0x07L) + 1; + if (numBits > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } else { + return (int) numBits; + } + } + + static int numBytesForBloomFilter(int bloomFilterSize) { + return Math.toIntExact((bloomFilterSize + 7L) / 8L); + } + + // Uses MurmurHash3-128 to generate a 64-bit hash value, then picks 7 subsets of 31 bits each and returns the values in the + // outputs array. This provides us with 7 reasonably independent hashes of the data for the cost of one MurmurHash3 calculation. + static int[] hashTerm(BytesRef br, int[] outputs) { + final long hash64 = MurmurHash3.hash64(br.bytes, br.offset, br.length); + final int upperHalf = (int) (hash64 >> 32); + final int lowerHalf = (int) hash64; + // Derive 7 hash outputs by combining the two 64-bit halves, adding the upper half multiplied with different small constants + // without common gcd. + outputs[0] = (lowerHalf + 2 * upperHalf) & 0x7FFF_FFFF; + outputs[1] = (lowerHalf + 3 * upperHalf) & 0x7FFF_FFFF; + outputs[2] = (lowerHalf + 5 * upperHalf) & 0x7FFF_FFFF; + outputs[3] = (lowerHalf + 7 * upperHalf) & 0x7FFF_FFFF; + outputs[4] = (lowerHalf + 11 * upperHalf) & 0x7FFF_FFFF; + outputs[5] = (lowerHalf + 13 * upperHalf) & 0x7FFF_FFFF; + outputs[6] = (lowerHalf + 17 * upperHalf) & 0x7FFF_FFFF; + return outputs; + } + + // + // The following Murmur3 implementation is borrowed from commons-codec. + // + + /** + * Implementation of the MurmurHash3 128-bit hash functions. + * + *

    + * MurmurHash is a non-cryptographic hash function suitable for general hash-based lookup. The name comes from two basic + * operations, multiply (MU) and rotate (R), used in its inner loop. Unlike cryptographic hash functions, it is not + * specifically designed to be difficult to reverse by an adversary, making it unsuitable for cryptographic purposes. + *

    + * + *

    + * This contains a Java port of the 32-bit hash function {@code MurmurHash3_x86_32} and the 128-bit hash function + * {@code MurmurHash3_x64_128} from Austin Appleby's original {@code c++} code in SMHasher. + *

    + * + *

    + * This is public domain code with no copyrights. From home page of + * SMHasher: + *

    + * + *
    "All MurmurHash versions are public domain software, and the author disclaims all copyright to their + * code."
    + * + *

    + * Original adaption from Apache Hive. That adaption contains a {@code hash64} method that is not part of the original + * MurmurHash3 code. It is not recommended to use these methods. They will be removed in a future release. To obtain a + * 64-bit hash use half of the bits from the {@code hash128x64} methods using the input data converted to bytes. + *

    + * + * @see MurmurHash + * @see Original MurmurHash3 c++ + * code + * @see + * Apache Hive Murmer3 + * @since 1.13 + */ + public static final class MurmurHash3 { + /** + * A default seed to use for the murmur hash algorithm. + * Has the value {@code 104729}. + */ + public static final int DEFAULT_SEED = 104729; + + // Constants for 128-bit variant + private static final long C1 = 0x87c37b91114253d5L; + private static final long C2 = 0x4cf5ad432745937fL; + private static final int R1 = 31; + private static final int R2 = 27; + private static final int R3 = 33; + private static final int M = 5; + private static final int N1 = 0x52dce729; + private static final int N2 = 0x38495ab5; + + /** No instance methods. */ + private MurmurHash3() {} + + /** + * Generates 64-bit hash from the byte array with the given offset, length and seed by discarding the second value of the 128-bit + * hash. + * + * This version uses the default seed. + * + * @param data The input byte array + * @param offset The first element of array + * @param length The length of array + * @return The sum of the two 64-bit hashes that make up the hash128 + */ + public static long hash64(final byte[] data, final int offset, final int length) { + // We hope that the C2 escape analysis prevents ths allocation from creating GC pressure. + long[] hash128 = { 0, 0 }; + hash128x64Internal(data, offset, length, DEFAULT_SEED, hash128); + return hash128[0]; + } + + /** + * Generates 128-bit hash from the byte array with the given offset, length and seed. + * + *

    This is an implementation of the 128-bit hash function {@code MurmurHash3_x64_128} + * from Austin Appleby's original MurmurHash3 {@code c++} code in SMHasher.

    + * + * @param data The input byte array + * @param offset The first element of array + * @param length The length of array + * @param seed The initial seed value + * @return The 128-bit hash (2 longs) + */ + @SuppressWarnings("fallthrough") + private static long[] hash128x64Internal( + final byte[] data, + final int offset, + final int length, + final long seed, + final long[] result + ) { + long h1 = seed; + long h2 = seed; + final int nblocks = length >> 4; + + // body + for (int i = 0; i < nblocks; i++) { + final int index = offset + (i << 4); + long k1 = ByteUtils.readLongLE(data, index); + long k2 = ByteUtils.readLongLE(data, index + 8); + + // mix functions for k1 + k1 *= C1; + k1 = Long.rotateLeft(k1, R1); + k1 *= C2; + h1 ^= k1; + h1 = Long.rotateLeft(h1, R2); + h1 += h2; + h1 = h1 * M + N1; + + // mix functions for k2 + k2 *= C2; + k2 = Long.rotateLeft(k2, R3); + k2 *= C1; + h2 ^= k2; + h2 = Long.rotateLeft(h2, R1); + h2 += h1; + h2 = h2 * M + N2; + } + + // tail + long k1 = 0; + long k2 = 0; + final int index = offset + (nblocks << 4); + switch (offset + length - index) { + case 15: + k2 ^= ((long) data[index + 14] & 0xff) << 48; + case 14: + k2 ^= ((long) data[index + 13] & 0xff) << 40; + case 13: + k2 ^= ((long) data[index + 12] & 0xff) << 32; + case 12: + k2 ^= ((long) data[index + 11] & 0xff) << 24; + case 11: + k2 ^= ((long) data[index + 10] & 0xff) << 16; + case 10: + k2 ^= ((long) data[index + 9] & 0xff) << 8; + case 9: + k2 ^= data[index + 8] & 0xff; + k2 *= C2; + k2 = Long.rotateLeft(k2, R3); + k2 *= C1; + h2 ^= k2; + + case 8: + k1 ^= ((long) data[index + 7] & 0xff) << 56; + case 7: + k1 ^= ((long) data[index + 6] & 0xff) << 48; + case 6: + k1 ^= ((long) data[index + 5] & 0xff) << 40; + case 5: + k1 ^= ((long) data[index + 4] & 0xff) << 32; + case 4: + k1 ^= ((long) data[index + 3] & 0xff) << 24; + case 3: + k1 ^= ((long) data[index + 2] & 0xff) << 16; + case 2: + k1 ^= ((long) data[index + 1] & 0xff) << 8; + case 1: + k1 ^= data[index] & 0xff; + k1 *= C1; + k1 = Long.rotateLeft(k1, R1); + k1 *= C2; + h1 ^= k1; + } + + // finalization + h1 ^= length; + h2 ^= length; + + h1 += h2; + h2 += h1; + + h1 = fmix64(h1); + h2 = fmix64(h2); + + h1 += h2; + h2 += h1; + + result[0] = h1; + result[1] = h2; + return result; + } + + /** + * Performs the final avalanche mix step of the 64-bit hash function {@code MurmurHash3_x64_128}. + * + * @param hash The current hash + * @return The final hash + */ + private static long fmix64(long hash) { + hash ^= (hash >>> 33); + hash *= 0xff51afd7ed558ccdL; + hash ^= (hash >>> 33); + hash *= 0xc4ceb9fe1a85ec53L; + hash ^= (hash >>> 33); + return hash; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java new file mode 100644 index 000000000000..23ad4810d853 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/DocValuesForUtil.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.tsdb; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; + +import java.io.IOException; + +public class DocValuesForUtil { + private final ForUtil forUtil = new ForUtil(); + private final int blockSize; + + public DocValuesForUtil() { + this(ES87TSDBDocValuesFormat.DEFAULT_NUMERIC_BLOCK_SIZE); + } + + public DocValuesForUtil(int blockSize) { + this.blockSize = blockSize; + } + + void encode(long[] in, int bitsPerValue, DataOutput out) throws IOException { + if (bitsPerValue <= 24) { // these bpvs are handled efficiently by ForUtil + forUtil.encode(in, bitsPerValue, out); + } else if (bitsPerValue <= 32) { + collapse32(in); + for (int i = 0; i < blockSize / 2; ++i) { + out.writeLong(in[i]); + } + } else { + for (long l : in) { + out.writeLong(l); + } + } + } + + void decode(int bitsPerValue, DataInput in, long[] out) throws IOException { + if (bitsPerValue <= 24) { + forUtil.decode(bitsPerValue, in, out); + } else if (bitsPerValue <= 32) { + in.readLongs(out, 0, blockSize / 2); + expand32(out); + } else { + in.readLongs(out, 0, blockSize); + } + } + + private static void collapse32(long[] arr) { + for (int i = 0; i < 64; ++i) { + arr[i] = (arr[i] << 32) | arr[64 + i]; + } + } + + private static void expand32(long[] arr) { + for (int i = 0; i < 64; ++i) { + long l = arr[i]; + arr[i] = l >>> 32; + arr[64 + i] = l & 0xFFFFFFFFL; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java new file mode 100644 index 000000000000..dfc35fc8ab38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesConsumer.java @@ -0,0 +1,246 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.tsdb; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.IndexedDISI; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.EmptyDocValuesProducer; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.store.ByteBuffersIndexOutput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.packed.DirectMonotonicWriter; +import org.elasticsearch.core.IOUtils; + +import java.io.IOException; +import java.util.Arrays; + +final class ES87TSDBDocValuesConsumer extends DocValuesConsumer { + + IndexOutput data, meta; + final int maxDoc; + private final int numericBlockShift; + private final int numericBlockSize; + private final int directMonotonicBlockShift; + + ES87TSDBDocValuesConsumer( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension, + int numericBlockShift, + int numericBlockSize, + int directMonotonicBlockShift + ) throws IOException { + boolean success = false; + this.numericBlockShift = numericBlockShift; + this.numericBlockSize = numericBlockSize; + this.directMonotonicBlockShift = directMonotonicBlockShift; + try { + final String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension); + data = state.directory.createOutput(dataName, state.context); + CodecUtil.writeIndexHeader( + data, + dataCodec, + ES87TSDBDocValuesFormat.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension); + meta = state.directory.createOutput(metaName, state.context); + CodecUtil.writeIndexHeader( + meta, + metaCodec, + ES87TSDBDocValuesFormat.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + maxDoc = state.segmentInfo.maxDoc(); + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(this); + } + } + } + + @Override + public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + meta.writeInt(field.number); + meta.writeByte(ES87TSDBDocValuesFormat.NUMERIC); + writeNumericField(field, new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { + return DocValues.singleton(valuesProducer.getNumeric(field)); + } + }); + } + + private long[] writeNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + int numDocsWithValue = 0; + long numValues = 0; + + SortedNumericDocValues values = valuesProducer.getSortedNumeric(field); + for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { + numDocsWithValue++; + final int count = values.docValueCount(); + numValues += count; + } + + if (numDocsWithValue == 0) { // meta[-2, 0]: No documents with values + meta.writeLong(-2); // docsWithFieldOffset + meta.writeLong(0L); // docsWithFieldLength + meta.writeShort((short) -1); // jumpTableEntryCount + meta.writeByte((byte) -1); // denseRankPower + } else if (numDocsWithValue == maxDoc) { // meta[-1, 0]: All documents have values + meta.writeLong(-1); // docsWithFieldOffset + meta.writeLong(0L); // docsWithFieldLength + meta.writeShort((short) -1); // jumpTableEntryCount + meta.writeByte((byte) -1); // denseRankPower + } else { // meta[data.offset, data.length]: IndexedDISI structure for documents with values + long offset = data.getFilePointer(); + meta.writeLong(offset); // docsWithFieldOffset + values = valuesProducer.getSortedNumeric(field); + final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); + meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength + meta.writeShort(jumpTableEntryCount); + meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); + } + meta.writeLong(numValues); + + if (numValues > 0) { + meta.writeInt(directMonotonicBlockShift); + final ByteBuffersDataOutput indexOut = new ByteBuffersDataOutput(); + final DirectMonotonicWriter indexWriter = DirectMonotonicWriter.getInstance( + meta, + new ByteBuffersIndexOutput(indexOut, "temp-dv-index", "temp-dv-index"), + 1L + ((numValues - 1) >>> numericBlockShift), + directMonotonicBlockShift + ); + + final long[] buffer = new long[numericBlockSize]; + int bufferSize = 0; + final long valuesDataOffset = data.getFilePointer(); + final ES87TSDBDocValuesEncoder encoder = new ES87TSDBDocValuesEncoder(numericBlockSize); + + values = valuesProducer.getSortedNumeric(field); + for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { + final int count = values.docValueCount(); + for (int i = 0; i < count; ++i) { + buffer[bufferSize++] = values.nextValue(); + if (bufferSize == numericBlockSize) { + indexWriter.add(data.getFilePointer() - valuesDataOffset); + encoder.encode(buffer, data); + bufferSize = 0; + } + } + } + if (bufferSize > 0) { + indexWriter.add(data.getFilePointer() - valuesDataOffset); + // Fill unused slots in the block with zeroes rather than junk + Arrays.fill(buffer, bufferSize, numericBlockSize, 0L); + encoder.encode(buffer, data); + } + + final long valuesDataLength = data.getFilePointer() - valuesDataOffset; + indexWriter.finish(); + final long indexDataOffset = data.getFilePointer(); + data.copyBytes(indexOut.toDataInput(), indexOut.size()); + meta.writeLong(indexDataOffset); + meta.writeLong(data.getFilePointer() - indexDataOffset); + + meta.writeLong(valuesDataOffset); + meta.writeLong(valuesDataLength); + } + + return new long[] { numDocsWithValue, numValues }; + } + + @Override + public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + throw new UnsupportedOperationException("Unsupported binary doc values for field [" + field.name + "]"); + } + + @Override + public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + throw new UnsupportedOperationException("Unsupported sorted doc values for field [" + field.name + "]"); + } + + @Override + public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + meta.writeInt(field.number); + meta.writeByte(ES87TSDBDocValuesFormat.SORTED_NUMERIC); + writeSortedNumericField(field, valuesProducer); + } + + private void writeSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + long[] stats = writeNumericField(field, valuesProducer); + int numDocsWithField = Math.toIntExact(stats[0]); + long numValues = stats[1]; + assert numValues >= numDocsWithField; + + meta.writeInt(numDocsWithField); + if (numValues > numDocsWithField) { + long start = data.getFilePointer(); + meta.writeLong(start); + meta.writeVInt(directMonotonicBlockShift); + + final DirectMonotonicWriter addressesWriter = DirectMonotonicWriter.getInstance( + meta, + data, + numDocsWithField + 1L, + directMonotonicBlockShift + ); + long addr = 0; + addressesWriter.add(addr); + SortedNumericDocValues values = valuesProducer.getSortedNumeric(field); + for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { + addr += values.docValueCount(); + addressesWriter.add(addr); + } + addressesWriter.finish(); + meta.writeLong(data.getFilePointer() - start); + } + } + + @Override + public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { + throw new UnsupportedOperationException("Unsupported sorted set doc values for field [" + field.name + "]"); + } + + @Override + public void close() throws IOException { + boolean success = false; + try { + if (meta != null) { + meta.writeInt(-1); // write EOF marker + CodecUtil.writeFooter(meta); // write checksum + } + if (data != null) { + CodecUtil.writeFooter(data); // write checksum + } + success = true; + } finally { + if (success) { + IOUtils.close(data, meta); + } else { + IOUtils.closeWhileHandlingException(data, meta); + } + meta = data = null; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java new file mode 100644 index 000000000000..5bae4857a5c8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesEncoder.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.tsdb; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.util.MathUtil; +import org.apache.lucene.util.packed.PackedInts; + +import java.io.IOException; +import java.util.Arrays; + +public class ES87TSDBDocValuesEncoder { + private final DocValuesForUtil forUtil; + private final int blockSize; + + public ES87TSDBDocValuesEncoder() { + this(ES87TSDBDocValuesFormat.DEFAULT_NUMERIC_BLOCK_SIZE); + } + + public ES87TSDBDocValuesEncoder(int blockSize) { + this.blockSize = blockSize; + this.forUtil = new DocValuesForUtil(blockSize); + } + + public int getBlockSize() { + return blockSize; + } + + /** + * Delta-encode monotonic fields. This is typically helpful with near-primary sort fields or + * SORTED_NUMERIC/SORTED_SET doc values with many values per document. + */ + private void deltaEncode(int token, int tokenBits, long[] in, DataOutput out) throws IOException { + int gts = 0; + int lts = 0; + for (int i = 1; i < blockSize; ++i) { + if (in[i] > in[i - 1]) { + gts++; + } else if (in[i] < in[i - 1]) { + lts++; + } + } + + final boolean doDeltaCompression = (gts == 0 && lts >= 2) || (lts == 0 && gts >= 2); + long first = 0; + if (doDeltaCompression) { + for (int i = blockSize - 1; i > 0; --i) { + in[i] -= in[i - 1]; + } + // Avoid setting in[0] to 0 in case there is a minimum interval between + // consecutive values. This might later help compress data using fewer + // bits per value. + first = in[0] - in[1]; + in[0] = in[1]; + token = (token << 1) | 0x01; + } else { + token <<= 1; + } + removeOffset(token, tokenBits + 1, in, out); + if (doDeltaCompression) { + out.writeZLong(first); + } + } + + private void removeOffset(int token, int tokenBits, long[] in, DataOutput out) throws IOException { + long min = Long.MAX_VALUE; + long max = Long.MIN_VALUE; + for (long l : in) { + min = Math.min(l, min); + max = Math.max(l, max); + } + + if (max - min < 0) { + // overflow + min = 0; + } else if (min > 0 && min < (max >>> 2)) { + // removing the offset is unlikely going to help save bits per value, yet it makes decoding + // slower + min = 0; + } + + if (min != 0) { + for (int i = 0; i < blockSize; ++i) { + in[i] -= min; + } + token = (token << 1) | 0x01; + } else { + token <<= 1; + } + + gcdEncode(token, tokenBits + 1, in, out); + if (min != 0) { + out.writeZLong(min); + } + } + + /** + * See if numbers have a common divisor. This is typically helpful for integer values in + * floats/doubles or dates that don't have millisecond accuracy. + */ + private void gcdEncode(int token, int tokenBits, long[] in, DataOutput out) throws IOException { + long gcd = 0; + for (long l : in) { + gcd = MathUtil.gcd(gcd, l); + if (gcd == 1) { + break; + } + } + final boolean doGcdCompression = Long.compareUnsigned(gcd, 1) > 0; + if (doGcdCompression) { + for (int i = 0; i < blockSize; ++i) { + in[i] /= gcd; + } + token = (token << 1) | 0x01; + } else { + token <<= 1; + } + + forEncode(token, tokenBits + 1, in, out); + if (doGcdCompression) { + out.writeVLong(gcd - 2); + } + } + + private void forEncode(int token, int tokenBits, long[] in, DataOutput out) throws IOException { + long or = 0; + for (long l : in) { + or |= l; + } + + final int bitsPerValue = or == 0 ? 0 : PackedInts.unsignedBitsRequired(or); + + out.writeVInt((bitsPerValue << tokenBits) | token); + if (bitsPerValue > 0) { + forUtil.encode(in, bitsPerValue, out); + } + } + + /** + * Encode the given longs using a combination of delta-coding, GCD factorization and bit packing. + */ + void encode(long[] in, DataOutput out) throws IOException { + assert in.length == blockSize; + + deltaEncode(0, 0, in, out); + } + + /** Decode longs that have been encoded with {@link #encode}. */ + void decode(DataInput in, long[] out) throws IOException { + assert out.length == blockSize : out.length; + + final int token = in.readVInt(); + final int bitsPerValue = token >>> 3; + + if (bitsPerValue != 0) { + forUtil.decode(bitsPerValue, in, out); + } else { + Arrays.fill(out, 0L); + } + + // simple blocks that only perform bit packing exit early here + // this is typical for SORTED(_SET) ordinals + if ((token & 0x07) != 0) { + + final boolean doGcdCompression = (token & 0x01) != 0; + if (doGcdCompression) { + final long gcd = 2 + in.readVLong(); + mul(out, gcd); + } + + final boolean hasOffset = (token & 0x02) != 0; + if (hasOffset) { + final long min = in.readZLong(); + add(out, min); + } + + final boolean doDeltaCompression = (token & 0x04) != 0; + if (doDeltaCompression) { + final long first = in.readZLong(); + out[0] += first; + deltaDecode(out); + } + } + } + + // this loop should auto-vectorize + private void mul(long[] arr, long m) { + for (int i = 0; i < blockSize; ++i) { + arr[i] *= m; + } + } + + // this loop should auto-vectorize + private void add(long[] arr, long min) { + for (int i = 0; i < blockSize; ++i) { + arr[i] += min; + } + } + + private void deltaDecode(long[] arr) { + for (int i = 1; i < blockSize; ++i) { + arr[i] += arr[i - 1]; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java new file mode 100644 index 000000000000..77e98489f79f --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesFormat.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.tsdb; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +public class ES87TSDBDocValuesFormat extends org.apache.lucene.codecs.DocValuesFormat { + + static final int DEFAULT_NUMERIC_BLOCK_SHIFT = 7; + static final int DEFAULT_NUMERIC_BLOCK_SIZE = 1 << DEFAULT_NUMERIC_BLOCK_SHIFT; + static final int DEFAULT_DIRECT_MONOTONIC_BLOCK_SHIFT = 16; + static final String CODEC_NAME = "ES87TSDB"; + static final String DATA_CODEC = "ES87TSDBDocValuesData"; + static final String DATA_EXTENSION = "dvd"; + static final String META_CODEC = "ES87TSDBDocValuesMetadata"; + static final String META_EXTENSION = "dvm"; + static final int VERSION_START = 0; + static final int VERSION_CURRENT = VERSION_START; + static final byte NUMERIC = 0; + static final byte BINARY = 1; + static final byte SORTED = 2; + static final byte SORTED_SET = 3; + static final byte SORTED_NUMERIC = 4; + + private final int numericBlockShift; + private final int numericBlockSize; + private final int numericBlockMask; + private final int directMonotonicBlockShift; + + public ES87TSDBDocValuesFormat() { + this(DEFAULT_NUMERIC_BLOCK_SHIFT, DEFAULT_DIRECT_MONOTONIC_BLOCK_SHIFT); + } + + public ES87TSDBDocValuesFormat(int numericBlockShift, int directMonotonicBlockShift) { + super(CODEC_NAME); + this.numericBlockShift = numericBlockShift; + this.numericBlockSize = 1 << numericBlockShift; + this.numericBlockMask = numericBlockSize - 1; + this.directMonotonicBlockShift = directMonotonicBlockShift; + } + + @Override + public DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + return new ES87TSDBDocValuesConsumer( + state, + DATA_CODEC, + DATA_EXTENSION, + META_CODEC, + META_EXTENSION, + numericBlockShift, + numericBlockSize, + directMonotonicBlockShift + ); + } + + @Override + public DocValuesProducer fieldsProducer(SegmentReadState state) throws IOException { + return new ES87TSDBDocValuesProducer( + state, + DATA_CODEC, + DATA_EXTENSION, + META_CODEC, + META_EXTENSION, + numericBlockShift, + numericBlockSize, + numericBlockMask + ); + } + + public int getNumericBlockShift() { + return numericBlockShift; + } + + public int getNumericBlockSize() { + return numericBlockSize; + } + + public int getNumericBlockMask() { + return numericBlockMask; + } + + public int getDirectMonotonicBlockShift() { + return directMonotonicBlockShift; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java new file mode 100644 index 000000000000..5a6991850333 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -0,0 +1,527 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.tsdb; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.IndexedDISI; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.store.ChecksumIndexInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.RandomAccessInput; +import org.apache.lucene.util.LongValues; +import org.apache.lucene.util.packed.DirectMonotonicReader; +import org.elasticsearch.core.IOUtils; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class ES87TSDBDocValuesProducer extends DocValuesProducer { + private final Map numerics = new HashMap<>(); + private final Map sortedNumerics = new HashMap<>(); + private final IndexInput data; + private final int maxDoc; + private final int numericBlockShift; + private final int numericBlockSize; + private final int numericBlockMask; + + ES87TSDBDocValuesProducer( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension, + int numericBlockShift, + int numericBlockSize, + int numericBlockMask + ) throws IOException { + this.numericBlockShift = numericBlockShift; + this.numericBlockSize = numericBlockSize; + this.numericBlockMask = numericBlockMask; + String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension); + this.maxDoc = state.segmentInfo.maxDoc(); + + // read in the entries from the metadata file. + int version = -1; + try (ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context)) { + Throwable priorE = null; + + try { + version = CodecUtil.checkIndexHeader( + in, + metaCodec, + ES87TSDBDocValuesFormat.VERSION_START, + ES87TSDBDocValuesFormat.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + + readFields(in, state.fieldInfos); + + } catch (Throwable exception) { + priorE = exception; + } finally { + CodecUtil.checkFooter(in, priorE); + } + } + + String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension); + this.data = state.directory.openInput(dataName, state.context); + boolean success = false; + try { + final int version2 = CodecUtil.checkIndexHeader( + data, + dataCodec, + ES87TSDBDocValuesFormat.VERSION_START, + ES87TSDBDocValuesFormat.VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + if (version != version2) { + throw new CorruptIndexException("Format versions mismatch: meta=" + version + ", data=" + version2, data); + } + + // NOTE: data file is too costly to verify checksum against all the bytes on open, + // but for now we at least verify proper structure of the checksum footer: which looks + // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption + // such as file truncation. + CodecUtil.retrieveChecksum(data); + + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(this.data); + } + } + } + + @Override + public NumericDocValues getNumeric(FieldInfo field) throws IOException { + NumericEntry entry = numerics.get(field.name); + return getNumeric(entry); + } + + @Override + public BinaryDocValues getBinary(FieldInfo field) throws IOException { + throw new UnsupportedOperationException("Unsupported binary doc values for field [" + field.name + "]"); + } + + @Override + public SortedDocValues getSorted(FieldInfo field) throws IOException { + throw new UnsupportedOperationException("Unsupported sorted doc values for field [" + field.name + "]"); + } + + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { + SortedNumericEntry entry = sortedNumerics.get(field.name); + return getSortedNumeric(entry); + } + + @Override + public SortedSetDocValues getSortedSet(FieldInfo field) throws IOException { + throw new UnsupportedOperationException("Unsupported sorted set doc values for field [" + field.name + "]"); + } + + @Override + public void checkIntegrity() throws IOException { + CodecUtil.checksumEntireFile(data); + } + + @Override + public void close() throws IOException { + data.close(); + } + + private void readFields(IndexInput meta, FieldInfos infos) throws IOException { + for (int fieldNumber = meta.readInt(); fieldNumber != -1; fieldNumber = meta.readInt()) { + FieldInfo info = infos.fieldInfo(fieldNumber); + if (info == null) { + throw new CorruptIndexException("Invalid field number: " + fieldNumber, meta); + } + byte type = meta.readByte(); + if (type == ES87TSDBDocValuesFormat.NUMERIC) { + numerics.put(info.name, readNumeric(meta)); + } else if (type == ES87TSDBDocValuesFormat.BINARY) { + throw new CorruptIndexException("unsupported type: " + type, meta); + } else if (type == ES87TSDBDocValuesFormat.SORTED) { + throw new CorruptIndexException("unsupported type: " + type, meta); + } else if (type == ES87TSDBDocValuesFormat.SORTED_SET) { + throw new CorruptIndexException("unsupported type: " + type, meta); + } else if (type == ES87TSDBDocValuesFormat.SORTED_NUMERIC) { + sortedNumerics.put(info.name, readSortedNumeric(meta)); + } else { + throw new CorruptIndexException("invalid type: " + type, meta); + } + } + } + + private NumericEntry readNumeric(IndexInput meta) throws IOException { + NumericEntry entry = new NumericEntry(); + readNumeric(meta, entry); + return entry; + } + + private void readNumeric(IndexInput meta, NumericEntry entry) throws IOException { + entry.docsWithFieldOffset = meta.readLong(); + entry.docsWithFieldLength = meta.readLong(); + entry.jumpTableEntryCount = meta.readShort(); + entry.denseRankPower = meta.readByte(); + entry.numValues = meta.readLong(); + if (entry.numValues > 0) { + final int indexBlockShift = meta.readInt(); + entry.indexMeta = DirectMonotonicReader.loadMeta(meta, 1 + ((entry.numValues - 1) >>> numericBlockShift), indexBlockShift); + entry.indexOffset = meta.readLong(); + entry.indexLength = meta.readLong(); + entry.valuesOffset = meta.readLong(); + entry.valuesLength = meta.readLong(); + } + } + + private SortedNumericEntry readSortedNumeric(IndexInput meta) throws IOException { + SortedNumericEntry entry = new SortedNumericEntry(); + readSortedNumeric(meta, entry); + return entry; + } + + private SortedNumericEntry readSortedNumeric(IndexInput meta, SortedNumericEntry entry) throws IOException { + readNumeric(meta, entry); + entry.numDocsWithField = meta.readInt(); + if (entry.numDocsWithField != entry.numValues) { + entry.addressesOffset = meta.readLong(); + final int blockShift = meta.readVInt(); + entry.addressesMeta = DirectMonotonicReader.loadMeta(meta, entry.numDocsWithField + 1, blockShift); + entry.addressesLength = meta.readLong(); + } + return entry; + } + + private abstract static class NumericValues { + abstract long advance(long index) throws IOException; + } + + private NumericDocValues getNumeric(NumericEntry entry) throws IOException { + if (entry.docsWithFieldOffset == -2) { + // empty + return DocValues.emptyNumeric(); + } + + // NOTE: we could make this a bit simpler by reusing #getValues but this + // makes things slower. + + final RandomAccessInput indexSlice = data.randomAccessSlice(entry.indexOffset, entry.indexLength); + final DirectMonotonicReader indexReader = DirectMonotonicReader.getInstance(entry.indexMeta, indexSlice); + final IndexInput valuesData = data.slice("values", entry.valuesOffset, entry.valuesLength); + + if (entry.docsWithFieldOffset == -1) { + // dense + return new NumericDocValues() { + + private final int maxDoc = ES87TSDBDocValuesProducer.this.maxDoc; + private int doc = -1; + private final ES87TSDBDocValuesEncoder decoder = new ES87TSDBDocValuesEncoder(numericBlockSize); + private long currentBlockIndex = -1; + private final long[] currentBlock = new long[numericBlockSize]; + + @Override + public int docID() { + return doc; + } + + @Override + public int nextDoc() throws IOException { + return advance(doc + 1); + } + + @Override + public int advance(int target) throws IOException { + if (target >= maxDoc) { + return doc = NO_MORE_DOCS; + } + return doc = target; + } + + @Override + public boolean advanceExact(int target) { + doc = target; + return true; + } + + @Override + public long cost() { + return maxDoc; + } + + @Override + public long longValue() throws IOException { + final int index = doc; + final int blockIndex = index >>> numericBlockShift; + final int blockInIndex = index & numericBlockMask; + if (blockIndex != currentBlockIndex) { + assert blockIndex > currentBlockIndex; + if (blockIndex - 1 > currentBlockIndex) { + valuesData.seek(indexReader.get(blockIndex)); + } + currentBlockIndex = blockIndex; + decoder.decode(valuesData, currentBlock); + } + return currentBlock[blockInIndex]; + } + }; + } else { + final IndexedDISI disi = new IndexedDISI( + data, + entry.docsWithFieldOffset, + entry.docsWithFieldLength, + entry.jumpTableEntryCount, + entry.denseRankPower, + entry.numValues + ); + return new NumericDocValues() { + + private final ES87TSDBDocValuesEncoder decoder = new ES87TSDBDocValuesEncoder(numericBlockSize); + private long currentBlockIndex = -1; + private final long[] currentBlock = new long[numericBlockSize]; + + @Override + public int advance(int target) throws IOException { + return disi.advance(target); + } + + @Override + public boolean advanceExact(int target) throws IOException { + return disi.advanceExact(target); + } + + @Override + public int nextDoc() throws IOException { + return disi.nextDoc(); + } + + @Override + public int docID() { + return disi.docID(); + } + + @Override + public long cost() { + return disi.cost(); + } + + @Override + public long longValue() throws IOException { + final int index = disi.index(); + final int blockIndex = index >>> numericBlockShift; + final int blockInIndex = index & numericBlockMask; + if (blockIndex != currentBlockIndex) { + assert blockIndex > currentBlockIndex; + if (blockIndex - 1 > currentBlockIndex) { + valuesData.seek(indexReader.get(blockIndex)); + } + currentBlockIndex = blockIndex; + decoder.decode(valuesData, currentBlock); + } + return currentBlock[blockInIndex]; + } + }; + } + } + + private NumericValues getValues(NumericEntry entry) throws IOException { + assert entry.numValues > 0; + final RandomAccessInput indexSlice = data.randomAccessSlice(entry.indexOffset, entry.indexLength); + final DirectMonotonicReader indexReader = DirectMonotonicReader.getInstance(entry.indexMeta, indexSlice); + + final IndexInput valuesData = data.slice("values", entry.valuesOffset, entry.valuesLength); + return new NumericValues() { + + private final ES87TSDBDocValuesEncoder decoder = new ES87TSDBDocValuesEncoder(); + private long currentBlockIndex = -1; + private final long[] currentBlock = new long[numericBlockSize]; + + @Override + long advance(long index) throws IOException { + final long blockIndex = index >>> numericBlockShift; + final int blockInIndex = (int) (index & numericBlockMask); + if (blockIndex != currentBlockIndex) { + assert blockIndex > currentBlockIndex; + if (blockIndex - 1 > currentBlockIndex) { + valuesData.seek(indexReader.get(blockIndex)); + } + currentBlockIndex = blockIndex; + decoder.decode(valuesData, currentBlock); + } + return currentBlock[blockInIndex]; + } + }; + } + + private SortedNumericDocValues getSortedNumeric(SortedNumericEntry entry) throws IOException { + if (entry.numValues == entry.numDocsWithField) { + return DocValues.singleton(getNumeric(entry)); + } + + final RandomAccessInput addressesInput = data.randomAccessSlice(entry.addressesOffset, entry.addressesLength); + final LongValues addresses = DirectMonotonicReader.getInstance(entry.addressesMeta, addressesInput); + + final NumericValues values = getValues(entry); + + if (entry.docsWithFieldOffset == -1) { + // dense + return new SortedNumericDocValues() { + + int doc = -1; + long start, end; + int count; + + @Override + public int nextDoc() throws IOException { + return advance(doc + 1); + } + + @Override + public int docID() { + return doc; + } + + @Override + public long cost() { + return maxDoc; + } + + @Override + public int advance(int target) throws IOException { + if (target >= maxDoc) { + return doc = NO_MORE_DOCS; + } + start = addresses.get(target); + end = addresses.get(target + 1L); + count = (int) (end - start); + return doc = target; + } + + @Override + public boolean advanceExact(int target) throws IOException { + start = addresses.get(target); + end = addresses.get(target + 1L); + count = (int) (end - start); + doc = target; + return true; + } + + @Override + public long nextValue() throws IOException { + return values.advance(start++); + } + + @Override + public int docValueCount() { + return count; + } + }; + } else { + // sparse + final IndexedDISI disi = new IndexedDISI( + data, + entry.docsWithFieldOffset, + entry.docsWithFieldLength, + entry.jumpTableEntryCount, + entry.denseRankPower, + entry.numDocsWithField + ); + return new SortedNumericDocValues() { + + boolean set; + long start, end; + int count; + + @Override + public int nextDoc() throws IOException { + set = false; + return disi.nextDoc(); + } + + @Override + public int docID() { + return disi.docID(); + } + + @Override + public long cost() { + return disi.cost(); + } + + @Override + public int advance(int target) throws IOException { + set = false; + return disi.advance(target); + } + + @Override + public boolean advanceExact(int target) throws IOException { + set = false; + return disi.advanceExact(target); + } + + @Override + public long nextValue() throws IOException { + set(); + return values.advance(start++); + } + + @Override + public int docValueCount() { + set(); + return count; + } + + private void set() { + if (set == false) { + final int index = disi.index(); + start = addresses.get(index); + end = addresses.get(index + 1L); + count = (int) (end - start); + set = true; + } + } + }; + } + } + + private static class NumericEntry { + long docsWithFieldOffset; + long docsWithFieldLength; + short jumpTableEntryCount; + byte denseRankPower; + long numValues; + long indexOffset; + long indexLength; + DirectMonotonicReader.Meta indexMeta; + long valuesOffset; + long valuesLength; + } + + private static class SortedNumericEntry extends NumericEntry { + int numDocsWithField; + DirectMonotonicReader.Meta addressesMeta; + long addressesOffset; + long addressesLength; + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ForUtil.java new file mode 100644 index 000000000000..2f20e0c8cee2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ForUtil.java @@ -0,0 +1,1047 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.codec.tsdb; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; + +import java.io.IOException; + +// Inspired from https://fulmicoton.com/posts/bitpacking/ +// Encodes multiple integers in a long to get SIMD-like speedups. +// If bitsPerValue <= 8 then we pack 8 ints per long +// else if bitsPerValue <= 16 we pack 4 ints per long +// else we pack 2 ints per long +public final class ForUtil { + + static final int DEFAULT_BLOCK_SIZE = 128; + private final int blockSize; + private final int blockSizeLog2; + private final long[] tmp; + + public ForUtil() { + this(DEFAULT_BLOCK_SIZE); + } + + public ForUtil(int blockSize) { + this.blockSize = blockSize; + this.blockSizeLog2 = (int) (Math.log(blockSize) / Math.log(2)); + this.tmp = new long[blockSize / 2]; + } + + private static long expandMask32(long mask32) { + return mask32 | (mask32 << 32); + } + + private static long expandMask16(long mask16) { + return expandMask32(mask16 | (mask16 << 16)); + } + + private static long expandMask8(long mask8) { + return expandMask16(mask8 | (mask8 << 8)); + } + + private static long mask32(int bitsPerValue) { + return expandMask32((1L << bitsPerValue) - 1); + } + + private static long mask16(int bitsPerValue) { + return expandMask16((1L << bitsPerValue) - 1); + } + + private static long mask8(int bitsPerValue) { + return expandMask8((1L << bitsPerValue) - 1); + } + + private static void expand8(long[] arr) { + for (int i = 0; i < 16; ++i) { + long l = arr[i]; + arr[i] = (l >>> 56) & 0xFFL; + arr[16 + i] = (l >>> 48) & 0xFFL; + arr[32 + i] = (l >>> 40) & 0xFFL; + arr[48 + i] = (l >>> 32) & 0xFFL; + arr[64 + i] = (l >>> 24) & 0xFFL; + arr[80 + i] = (l >>> 16) & 0xFFL; + arr[96 + i] = (l >>> 8) & 0xFFL; + arr[112 + i] = l & 0xFFL; + } + } + + private static void expand8To32(long[] arr) { + for (int i = 0; i < 16; ++i) { + long l = arr[i]; + arr[i] = (l >>> 24) & 0x000000FF000000FFL; + arr[16 + i] = (l >>> 16) & 0x000000FF000000FFL; + arr[32 + i] = (l >>> 8) & 0x000000FF000000FFL; + arr[48 + i] = l & 0x000000FF000000FFL; + } + } + + private static void collapse8(long[] arr) { + for (int i = 0; i < 16; ++i) { + arr[i] = (arr[i] << 56) | (arr[16 + i] << 48) | (arr[32 + i] << 40) | (arr[48 + i] << 32) | (arr[64 + i] << 24) | (arr[80 + i] + << 16) | (arr[96 + i] << 8) | arr[112 + i]; + } + } + + private static void expand16(long[] arr) { + for (int i = 0; i < 32; ++i) { + long l = arr[i]; + arr[i] = (l >>> 48) & 0xFFFFL; + arr[32 + i] = (l >>> 32) & 0xFFFFL; + arr[64 + i] = (l >>> 16) & 0xFFFFL; + arr[96 + i] = l & 0xFFFFL; + } + } + + private static void expand16To32(long[] arr) { + for (int i = 0; i < 32; ++i) { + long l = arr[i]; + arr[i] = (l >>> 16) & 0x0000FFFF0000FFFFL; + arr[32 + i] = l & 0x0000FFFF0000FFFFL; + } + } + + private static void collapse16(long[] arr) { + for (int i = 0; i < 32; ++i) { + arr[i] = (arr[i] << 48) | (arr[32 + i] << 32) | (arr[64 + i] << 16) | arr[96 + i]; + } + } + + private static void expand32(long[] arr) { + for (int i = 0; i < 64; ++i) { + long l = arr[i]; + arr[i] = l >>> 32; + arr[64 + i] = l & 0xFFFFFFFFL; + } + } + + private static void collapse32(long[] arr) { + for (int i = 0; i < 64; ++i) { + arr[i] = (arr[i] << 32) | arr[64 + i]; + } + } + + /** Encode 128 integers from {@code longs} into {@code out}. */ + void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { + final int nextPrimitive; + final int numLongs; + if (bitsPerValue <= 8) { + nextPrimitive = 8; + numLongs = blockSize / 8; + collapse8(longs); + } else if (bitsPerValue <= 16) { + nextPrimitive = 16; + numLongs = blockSize / 4; + collapse16(longs); + } else { + nextPrimitive = 32; + numLongs = blockSize / 2; + collapse32(longs); + } + + final int numLongsPerShift = bitsPerValue * 2; + int idx = 0; + int shift = nextPrimitive - bitsPerValue; + for (int i = 0; i < numLongsPerShift; ++i) { + tmp[i] = longs[idx++] << shift; + } + for (shift = shift - bitsPerValue; shift >= 0; shift -= bitsPerValue) { + for (int i = 0; i < numLongsPerShift; ++i) { + tmp[i] |= longs[idx++] << shift; + } + } + + final int remainingBitsPerLong = shift + bitsPerValue; + final long maskRemainingBitsPerLong; + if (nextPrimitive == 8) { + maskRemainingBitsPerLong = MASKS8[remainingBitsPerLong]; + } else if (nextPrimitive == 16) { + maskRemainingBitsPerLong = MASKS16[remainingBitsPerLong]; + } else { + maskRemainingBitsPerLong = MASKS32[remainingBitsPerLong]; + } + + int tmpIdx = 0; + int remainingBitsPerValue = bitsPerValue; + while (idx < numLongs) { + if (remainingBitsPerValue >= remainingBitsPerLong) { + remainingBitsPerValue -= remainingBitsPerLong; + tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & maskRemainingBitsPerLong; + if (remainingBitsPerValue == 0) { + idx++; + remainingBitsPerValue = bitsPerValue; + } + } else { + final long mask1, mask2; + if (nextPrimitive == 8) { + mask1 = MASKS8[remainingBitsPerValue]; + mask2 = MASKS8[remainingBitsPerLong - remainingBitsPerValue]; + } else if (nextPrimitive == 16) { + mask1 = MASKS16[remainingBitsPerValue]; + mask2 = MASKS16[remainingBitsPerLong - remainingBitsPerValue]; + } else { + mask1 = MASKS32[remainingBitsPerValue]; + mask2 = MASKS32[remainingBitsPerLong - remainingBitsPerValue]; + } + tmp[tmpIdx] |= (longs[idx++] & mask1) << (remainingBitsPerLong - remainingBitsPerValue); + remainingBitsPerValue = bitsPerValue - remainingBitsPerLong + remainingBitsPerValue; + tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & mask2; + } + } + + for (int i = 0; i < numLongsPerShift; ++i) { + out.writeLong(tmp[i]); + } + } + + /** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */ + int numBytes(int bitsPerValue) { + return bitsPerValue << (blockSizeLog2 - 3); + } + + private static void decodeSlow(int blockSize, int bitsPerValue, DataInput in, long[] tmp, long[] longs) throws IOException { + final int numLongs = bitsPerValue << 1; + in.readLongs(tmp, 0, numLongs); + final long mask = MASKS32[bitsPerValue]; + int longsIdx = 0; + int shift = 32 - bitsPerValue; + for (; shift >= 0; shift -= bitsPerValue) { + shiftLongs(tmp, numLongs, longs, longsIdx, shift, mask); + longsIdx += numLongs; + } + final int remainingBitsPerLong = shift + bitsPerValue; + final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong]; + int tmpIdx = 0; + int remainingBits = remainingBitsPerLong; + for (; longsIdx < blockSize / 2; ++longsIdx) { + int b = bitsPerValue - remainingBits; + long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b; + while (b >= remainingBitsPerLong) { + b -= remainingBitsPerLong; + l |= (tmp[tmpIdx++] & mask32RemainingBitsPerLong) << b; + } + if (b > 0) { + l |= (tmp[tmpIdx] >>> (remainingBitsPerLong - b)) & MASKS32[b]; + remainingBits = remainingBitsPerLong - b; + } else { + remainingBits = remainingBitsPerLong; + } + longs[longsIdx] = l; + } + } + + /** + * The pattern that this shiftLongs method applies is recognized by the C2 compiler, which + * generates SIMD instructions for it in order to shift multiple longs at once. + */ + private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, long mask) { + for (int i = 0; i < count; ++i) { + b[bi + i] = (a[i] >>> shift) & mask; + } + } + + private static final long[] MASKS8 = new long[8]; + private static final long[] MASKS16 = new long[16]; + private static final long[] MASKS32 = new long[32]; + + static { + for (int i = 0; i < 8; ++i) { + MASKS8[i] = mask8(i); + } + for (int i = 0; i < 16; ++i) { + MASKS16[i] = mask16(i); + } + for (int i = 0; i < 32; ++i) { + MASKS32[i] = mask32(i); + } + } + // mark values in array as final longs to avoid the cost of reading array, arrays should only be + // used when the idx is a variable + private static final long MASK8_1 = MASKS8[1]; + private static final long MASK8_2 = MASKS8[2]; + private static final long MASK8_3 = MASKS8[3]; + private static final long MASK8_4 = MASKS8[4]; + private static final long MASK8_5 = MASKS8[5]; + private static final long MASK8_6 = MASKS8[6]; + private static final long MASK8_7 = MASKS8[7]; + private static final long MASK16_1 = MASKS16[1]; + private static final long MASK16_2 = MASKS16[2]; + private static final long MASK16_3 = MASKS16[3]; + private static final long MASK16_4 = MASKS16[4]; + private static final long MASK16_5 = MASKS16[5]; + private static final long MASK16_6 = MASKS16[6]; + private static final long MASK16_7 = MASKS16[7]; + private static final long MASK16_9 = MASKS16[9]; + private static final long MASK16_10 = MASKS16[10]; + private static final long MASK16_11 = MASKS16[11]; + private static final long MASK16_12 = MASKS16[12]; + private static final long MASK16_13 = MASKS16[13]; + private static final long MASK16_14 = MASKS16[14]; + private static final long MASK16_15 = MASKS16[15]; + private static final long MASK32_1 = MASKS32[1]; + private static final long MASK32_2 = MASKS32[2]; + private static final long MASK32_3 = MASKS32[3]; + private static final long MASK32_4 = MASKS32[4]; + private static final long MASK32_5 = MASKS32[5]; + private static final long MASK32_6 = MASKS32[6]; + private static final long MASK32_7 = MASKS32[7]; + private static final long MASK32_8 = MASKS32[8]; + private static final long MASK32_9 = MASKS32[9]; + private static final long MASK32_10 = MASKS32[10]; + private static final long MASK32_11 = MASKS32[11]; + private static final long MASK32_12 = MASKS32[12]; + private static final long MASK32_13 = MASKS32[13]; + private static final long MASK32_14 = MASKS32[14]; + private static final long MASK32_15 = MASKS32[15]; + private static final long MASK32_17 = MASKS32[17]; + private static final long MASK32_18 = MASKS32[18]; + private static final long MASK32_19 = MASKS32[19]; + private static final long MASK32_20 = MASKS32[20]; + private static final long MASK32_21 = MASKS32[21]; + private static final long MASK32_22 = MASKS32[22]; + private static final long MASK32_23 = MASKS32[23]; + private static final long MASK32_24 = MASKS32[24]; + + /** Decode 128 integers into {@code longs}. */ + void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { + switch (bitsPerValue) { + case 1: + decode1(in, tmp, longs); + expand8(longs); + break; + case 2: + decode2(in, tmp, longs); + expand8(longs); + break; + case 3: + decode3(in, tmp, longs); + expand8(longs); + break; + case 4: + decode4(in, tmp, longs); + expand8(longs); + break; + case 5: + decode5(in, tmp, longs); + expand8(longs); + break; + case 6: + decode6(in, tmp, longs); + expand8(longs); + break; + case 7: + decode7(in, tmp, longs); + expand8(longs); + break; + case 8: + decode8(in, tmp, longs); + expand8(longs); + break; + case 9: + decode9(in, tmp, longs); + expand16(longs); + break; + case 10: + decode10(in, tmp, longs); + expand16(longs); + break; + case 11: + decode11(in, tmp, longs); + expand16(longs); + break; + case 12: + decode12(in, tmp, longs); + expand16(longs); + break; + case 13: + decode13(in, tmp, longs); + expand16(longs); + break; + case 14: + decode14(in, tmp, longs); + expand16(longs); + break; + case 15: + decode15(in, tmp, longs); + expand16(longs); + break; + case 16: + decode16(in, tmp, longs); + expand16(longs); + break; + case 17: + decode17(in, tmp, longs); + expand32(longs); + break; + case 18: + decode18(in, tmp, longs); + expand32(longs); + break; + case 19: + decode19(in, tmp, longs); + expand32(longs); + break; + case 20: + decode20(in, tmp, longs); + expand32(longs); + break; + case 21: + decode21(in, tmp, longs); + expand32(longs); + break; + case 22: + decode22(in, tmp, longs); + expand32(longs); + break; + case 23: + decode23(in, tmp, longs); + expand32(longs); + break; + case 24: + decode24(in, tmp, longs); + expand32(longs); + break; + default: + decodeSlow(blockSize, bitsPerValue, in, tmp, longs); + expand32(longs); + break; + } + } + + /** + * Decodes 128 integers into 64 {@code longs} such that each long contains two values, each + * represented with 32 bits. Values [0..63] are encoded in the high-order bits of {@code longs} + * [0..63], and values [64..127] are encoded in the low-order bits of {@code longs} [0..63]. This + * representation may allow subsequent operations to be performed on two values at a time. + */ + void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException { + switch (bitsPerValue) { + case 1: + decode1(in, tmp, longs); + expand8To32(longs); + break; + case 2: + decode2(in, tmp, longs); + expand8To32(longs); + break; + case 3: + decode3(in, tmp, longs); + expand8To32(longs); + break; + case 4: + decode4(in, tmp, longs); + expand8To32(longs); + break; + case 5: + decode5(in, tmp, longs); + expand8To32(longs); + break; + case 6: + decode6(in, tmp, longs); + expand8To32(longs); + break; + case 7: + decode7(in, tmp, longs); + expand8To32(longs); + break; + case 8: + decode8(in, tmp, longs); + expand8To32(longs); + break; + case 9: + decode9(in, tmp, longs); + expand16To32(longs); + break; + case 10: + decode10(in, tmp, longs); + expand16To32(longs); + break; + case 11: + decode11(in, tmp, longs); + expand16To32(longs); + break; + case 12: + decode12(in, tmp, longs); + expand16To32(longs); + break; + case 13: + decode13(in, tmp, longs); + expand16To32(longs); + break; + case 14: + decode14(in, tmp, longs); + expand16To32(longs); + break; + case 15: + decode15(in, tmp, longs); + expand16To32(longs); + break; + case 16: + decode16(in, tmp, longs); + expand16To32(longs); + break; + case 17: + decode17(in, tmp, longs); + break; + case 18: + decode18(in, tmp, longs); + break; + case 19: + decode19(in, tmp, longs); + break; + case 20: + decode20(in, tmp, longs); + break; + case 21: + decode21(in, tmp, longs); + break; + case 22: + decode22(in, tmp, longs); + break; + case 23: + decode23(in, tmp, longs); + break; + case 24: + decode24(in, tmp, longs); + break; + default: + decodeSlow(blockSize, bitsPerValue, in, tmp, longs); + break; + } + } + + private static void decode1(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 2); + shiftLongs(tmp, 2, longs, 0, 7, MASK8_1); + shiftLongs(tmp, 2, longs, 2, 6, MASK8_1); + shiftLongs(tmp, 2, longs, 4, 5, MASK8_1); + shiftLongs(tmp, 2, longs, 6, 4, MASK8_1); + shiftLongs(tmp, 2, longs, 8, 3, MASK8_1); + shiftLongs(tmp, 2, longs, 10, 2, MASK8_1); + shiftLongs(tmp, 2, longs, 12, 1, MASK8_1); + shiftLongs(tmp, 2, longs, 14, 0, MASK8_1); + } + + private static void decode2(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 4); + shiftLongs(tmp, 4, longs, 0, 6, MASK8_2); + shiftLongs(tmp, 4, longs, 4, 4, MASK8_2); + shiftLongs(tmp, 4, longs, 8, 2, MASK8_2); + shiftLongs(tmp, 4, longs, 12, 0, MASK8_2); + } + + private static void decode3(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 6); + shiftLongs(tmp, 6, longs, 0, 5, MASK8_3); + shiftLongs(tmp, 6, longs, 6, 2, MASK8_3); + for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 2; ++iter, tmpIdx += 3, longsIdx += 2) { + long l0 = (tmp[tmpIdx + 0] & MASK8_2) << 1; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 2; + l1 |= (tmp[tmpIdx + 2] & MASK8_2) << 0; + longs[longsIdx + 1] = l1; + } + } + + private static void decode4(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 8); + shiftLongs(tmp, 8, longs, 0, 4, MASK8_4); + shiftLongs(tmp, 8, longs, 8, 0, MASK8_4); + } + + private static void decode5(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 10); + shiftLongs(tmp, 10, longs, 0, 3, MASK8_5); + for (int iter = 0, tmpIdx = 0, longsIdx = 10; iter < 2; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK8_3) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 4; + l1 |= (tmp[tmpIdx + 2] & MASK8_3) << 1; + l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK8_1; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK8_2) << 3; + l2 |= (tmp[tmpIdx + 4] & MASK8_3) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode6(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 12); + shiftLongs(tmp, 12, longs, 0, 2, MASK8_6); + shiftLongs(tmp, 12, tmp, 0, 0, MASK8_2); + for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 4; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 4; + l0 |= tmp[tmpIdx + 1] << 2; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode7(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 14); + shiftLongs(tmp, 14, longs, 0, 1, MASK8_7); + shiftLongs(tmp, 14, tmp, 0, 0, MASK8_1); + for (int iter = 0, tmpIdx = 0, longsIdx = 14; iter < 2; ++iter, tmpIdx += 7, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 6; + l0 |= tmp[tmpIdx + 1] << 5; + l0 |= tmp[tmpIdx + 2] << 4; + l0 |= tmp[tmpIdx + 3] << 3; + l0 |= tmp[tmpIdx + 4] << 2; + l0 |= tmp[tmpIdx + 5] << 1; + l0 |= tmp[tmpIdx + 6] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode8(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(longs, 0, 16); + } + + private static void decode9(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 18); + shiftLongs(tmp, 18, longs, 0, 7, MASK16_9); + for (int iter = 0, tmpIdx = 0, longsIdx = 18; iter < 2; ++iter, tmpIdx += 9, longsIdx += 7) { + long l0 = (tmp[tmpIdx + 0] & MASK16_7) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 5) & MASK16_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK16_5) << 4; + l1 |= (tmp[tmpIdx + 2] >>> 3) & MASK16_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK16_3) << 6; + l2 |= (tmp[tmpIdx + 3] >>> 1) & MASK16_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK16_1) << 8; + l3 |= (tmp[tmpIdx + 4] & MASK16_7) << 1; + l3 |= (tmp[tmpIdx + 5] >>> 6) & MASK16_1; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK16_6) << 3; + l4 |= (tmp[tmpIdx + 6] >>> 4) & MASK16_3; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 6] & MASK16_4) << 5; + l5 |= (tmp[tmpIdx + 7] >>> 2) & MASK16_5; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 7] & MASK16_2) << 7; + l6 |= (tmp[tmpIdx + 8] & MASK16_7) << 0; + longs[longsIdx + 6] = l6; + } + } + + private static void decode10(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 20); + shiftLongs(tmp, 20, longs, 0, 6, MASK16_10); + for (int iter = 0, tmpIdx = 0, longsIdx = 20; iter < 4; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK16_6) << 4; + l0 |= (tmp[tmpIdx + 1] >>> 2) & MASK16_4; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK16_2) << 8; + l1 |= (tmp[tmpIdx + 2] & MASK16_6) << 2; + l1 |= (tmp[tmpIdx + 3] >>> 4) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK16_4) << 6; + l2 |= (tmp[tmpIdx + 4] & MASK16_6) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode11(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 22); + shiftLongs(tmp, 22, longs, 0, 5, MASK16_11); + for (int iter = 0, tmpIdx = 0, longsIdx = 22; iter < 2; ++iter, tmpIdx += 11, longsIdx += 5) { + long l0 = (tmp[tmpIdx + 0] & MASK16_5) << 6; + l0 |= (tmp[tmpIdx + 1] & MASK16_5) << 1; + l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK16_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK16_4) << 7; + l1 |= (tmp[tmpIdx + 3] & MASK16_5) << 2; + l1 |= (tmp[tmpIdx + 4] >>> 3) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 4] & MASK16_3) << 8; + l2 |= (tmp[tmpIdx + 5] & MASK16_5) << 3; + l2 |= (tmp[tmpIdx + 6] >>> 2) & MASK16_3; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 6] & MASK16_2) << 9; + l3 |= (tmp[tmpIdx + 7] & MASK16_5) << 4; + l3 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_4; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 8] & MASK16_1) << 10; + l4 |= (tmp[tmpIdx + 9] & MASK16_5) << 5; + l4 |= (tmp[tmpIdx + 10] & MASK16_5) << 0; + longs[longsIdx + 4] = l4; + } + } + + private static void decode12(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 24); + shiftLongs(tmp, 24, longs, 0, 4, MASK16_12); + shiftLongs(tmp, 24, tmp, 0, 0, MASK16_4); + for (int iter = 0, tmpIdx = 0, longsIdx = 24; iter < 8; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 8; + l0 |= tmp[tmpIdx + 1] << 4; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode13(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 26); + shiftLongs(tmp, 26, longs, 0, 3, MASK16_13); + for (int iter = 0, tmpIdx = 0, longsIdx = 26; iter < 2; ++iter, tmpIdx += 13, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK16_3) << 10; + l0 |= (tmp[tmpIdx + 1] & MASK16_3) << 7; + l0 |= (tmp[tmpIdx + 2] & MASK16_3) << 4; + l0 |= (tmp[tmpIdx + 3] & MASK16_3) << 1; + l0 |= (tmp[tmpIdx + 4] >>> 2) & MASK16_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 4] & MASK16_2) << 11; + l1 |= (tmp[tmpIdx + 5] & MASK16_3) << 8; + l1 |= (tmp[tmpIdx + 6] & MASK16_3) << 5; + l1 |= (tmp[tmpIdx + 7] & MASK16_3) << 2; + l1 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 8] & MASK16_1) << 12; + l2 |= (tmp[tmpIdx + 9] & MASK16_3) << 9; + l2 |= (tmp[tmpIdx + 10] & MASK16_3) << 6; + l2 |= (tmp[tmpIdx + 11] & MASK16_3) << 3; + l2 |= (tmp[tmpIdx + 12] & MASK16_3) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode14(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 28); + shiftLongs(tmp, 28, longs, 0, 2, MASK16_14); + shiftLongs(tmp, 28, tmp, 0, 0, MASK16_2); + for (int iter = 0, tmpIdx = 0, longsIdx = 28; iter < 4; ++iter, tmpIdx += 7, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 12; + l0 |= tmp[tmpIdx + 1] << 10; + l0 |= tmp[tmpIdx + 2] << 8; + l0 |= tmp[tmpIdx + 3] << 6; + l0 |= tmp[tmpIdx + 4] << 4; + l0 |= tmp[tmpIdx + 5] << 2; + l0 |= tmp[tmpIdx + 6] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode15(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 30); + shiftLongs(tmp, 30, longs, 0, 1, MASK16_15); + shiftLongs(tmp, 30, tmp, 0, 0, MASK16_1); + for (int iter = 0, tmpIdx = 0, longsIdx = 30; iter < 2; ++iter, tmpIdx += 15, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 14; + l0 |= tmp[tmpIdx + 1] << 13; + l0 |= tmp[tmpIdx + 2] << 12; + l0 |= tmp[tmpIdx + 3] << 11; + l0 |= tmp[tmpIdx + 4] << 10; + l0 |= tmp[tmpIdx + 5] << 9; + l0 |= tmp[tmpIdx + 6] << 8; + l0 |= tmp[tmpIdx + 7] << 7; + l0 |= tmp[tmpIdx + 8] << 6; + l0 |= tmp[tmpIdx + 9] << 5; + l0 |= tmp[tmpIdx + 10] << 4; + l0 |= tmp[tmpIdx + 11] << 3; + l0 |= tmp[tmpIdx + 12] << 2; + l0 |= tmp[tmpIdx + 13] << 1; + l0 |= tmp[tmpIdx + 14] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode16(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(longs, 0, 32); + } + + private static void decode17(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 34); + shiftLongs(tmp, 34, longs, 0, 15, MASK32_17); + for (int iter = 0, tmpIdx = 0, longsIdx = 34; iter < 2; ++iter, tmpIdx += 17, longsIdx += 15) { + long l0 = (tmp[tmpIdx + 0] & MASK32_15) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 13) & MASK32_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_13) << 4; + l1 |= (tmp[tmpIdx + 2] >>> 11) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_11) << 6; + l2 |= (tmp[tmpIdx + 3] >>> 9) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK32_9) << 8; + l3 |= (tmp[tmpIdx + 4] >>> 7) & MASK32_8; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 4] & MASK32_7) << 10; + l4 |= (tmp[tmpIdx + 5] >>> 5) & MASK32_10; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 5] & MASK32_5) << 12; + l5 |= (tmp[tmpIdx + 6] >>> 3) & MASK32_12; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 6] & MASK32_3) << 14; + l6 |= (tmp[tmpIdx + 7] >>> 1) & MASK32_14; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 7] & MASK32_1) << 16; + l7 |= (tmp[tmpIdx + 8] & MASK32_15) << 1; + l7 |= (tmp[tmpIdx + 9] >>> 14) & MASK32_1; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 9] & MASK32_14) << 3; + l8 |= (tmp[tmpIdx + 10] >>> 12) & MASK32_3; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 10] & MASK32_12) << 5; + l9 |= (tmp[tmpIdx + 11] >>> 10) & MASK32_5; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 11] & MASK32_10) << 7; + l10 |= (tmp[tmpIdx + 12] >>> 8) & MASK32_7; + longs[longsIdx + 10] = l10; + long l11 = (tmp[tmpIdx + 12] & MASK32_8) << 9; + l11 |= (tmp[tmpIdx + 13] >>> 6) & MASK32_9; + longs[longsIdx + 11] = l11; + long l12 = (tmp[tmpIdx + 13] & MASK32_6) << 11; + l12 |= (tmp[tmpIdx + 14] >>> 4) & MASK32_11; + longs[longsIdx + 12] = l12; + long l13 = (tmp[tmpIdx + 14] & MASK32_4) << 13; + l13 |= (tmp[tmpIdx + 15] >>> 2) & MASK32_13; + longs[longsIdx + 13] = l13; + long l14 = (tmp[tmpIdx + 15] & MASK32_2) << 15; + l14 |= (tmp[tmpIdx + 16] & MASK32_15) << 0; + longs[longsIdx + 14] = l14; + } + } + + private static void decode18(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 36); + shiftLongs(tmp, 36, longs, 0, 14, MASK32_18); + for (int iter = 0, tmpIdx = 0, longsIdx = 36; iter < 4; ++iter, tmpIdx += 9, longsIdx += 7) { + long l0 = (tmp[tmpIdx + 0] & MASK32_14) << 4; + l0 |= (tmp[tmpIdx + 1] >>> 10) & MASK32_4; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_10) << 8; + l1 |= (tmp[tmpIdx + 2] >>> 6) & MASK32_8; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_6) << 12; + l2 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_12; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK32_2) << 16; + l3 |= (tmp[tmpIdx + 4] & MASK32_14) << 2; + l3 |= (tmp[tmpIdx + 5] >>> 12) & MASK32_2; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK32_12) << 6; + l4 |= (tmp[tmpIdx + 6] >>> 8) & MASK32_6; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 6] & MASK32_8) << 10; + l5 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_10; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 7] & MASK32_4) << 14; + l6 |= (tmp[tmpIdx + 8] & MASK32_14) << 0; + longs[longsIdx + 6] = l6; + } + } + + private static void decode19(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 38); + shiftLongs(tmp, 38, longs, 0, 13, MASK32_19); + for (int iter = 0, tmpIdx = 0, longsIdx = 38; iter < 2; ++iter, tmpIdx += 19, longsIdx += 13) { + long l0 = (tmp[tmpIdx + 0] & MASK32_13) << 6; + l0 |= (tmp[tmpIdx + 1] >>> 7) & MASK32_6; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_7) << 12; + l1 |= (tmp[tmpIdx + 2] >>> 1) & MASK32_12; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_1) << 18; + l2 |= (tmp[tmpIdx + 3] & MASK32_13) << 5; + l2 |= (tmp[tmpIdx + 4] >>> 8) & MASK32_5; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 4] & MASK32_8) << 11; + l3 |= (tmp[tmpIdx + 5] >>> 2) & MASK32_11; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK32_2) << 17; + l4 |= (tmp[tmpIdx + 6] & MASK32_13) << 4; + l4 |= (tmp[tmpIdx + 7] >>> 9) & MASK32_4; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 7] & MASK32_9) << 10; + l5 |= (tmp[tmpIdx + 8] >>> 3) & MASK32_10; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 8] & MASK32_3) << 16; + l6 |= (tmp[tmpIdx + 9] & MASK32_13) << 3; + l6 |= (tmp[tmpIdx + 10] >>> 10) & MASK32_3; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 10] & MASK32_10) << 9; + l7 |= (tmp[tmpIdx + 11] >>> 4) & MASK32_9; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 11] & MASK32_4) << 15; + l8 |= (tmp[tmpIdx + 12] & MASK32_13) << 2; + l8 |= (tmp[tmpIdx + 13] >>> 11) & MASK32_2; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 13] & MASK32_11) << 8; + l9 |= (tmp[tmpIdx + 14] >>> 5) & MASK32_8; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 14] & MASK32_5) << 14; + l10 |= (tmp[tmpIdx + 15] & MASK32_13) << 1; + l10 |= (tmp[tmpIdx + 16] >>> 12) & MASK32_1; + longs[longsIdx + 10] = l10; + long l11 = (tmp[tmpIdx + 16] & MASK32_12) << 7; + l11 |= (tmp[tmpIdx + 17] >>> 6) & MASK32_7; + longs[longsIdx + 11] = l11; + long l12 = (tmp[tmpIdx + 17] & MASK32_6) << 13; + l12 |= (tmp[tmpIdx + 18] & MASK32_13) << 0; + longs[longsIdx + 12] = l12; + } + } + + private static void decode20(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 40); + shiftLongs(tmp, 40, longs, 0, 12, MASK32_20); + for (int iter = 0, tmpIdx = 0, longsIdx = 40; iter < 8; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK32_12) << 8; + l0 |= (tmp[tmpIdx + 1] >>> 4) & MASK32_8; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_4) << 16; + l1 |= (tmp[tmpIdx + 2] & MASK32_12) << 4; + l1 |= (tmp[tmpIdx + 3] >>> 8) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK32_8) << 12; + l2 |= (tmp[tmpIdx + 4] & MASK32_12) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode21(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 42); + shiftLongs(tmp, 42, longs, 0, 11, MASK32_21); + for (int iter = 0, tmpIdx = 0, longsIdx = 42; iter < 2; ++iter, tmpIdx += 21, longsIdx += 11) { + long l0 = (tmp[tmpIdx + 0] & MASK32_11) << 10; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK32_10; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_1) << 20; + l1 |= (tmp[tmpIdx + 2] & MASK32_11) << 9; + l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_9; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK32_2) << 19; + l2 |= (tmp[tmpIdx + 4] & MASK32_11) << 8; + l2 |= (tmp[tmpIdx + 5] >>> 3) & MASK32_8; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 5] & MASK32_3) << 18; + l3 |= (tmp[tmpIdx + 6] & MASK32_11) << 7; + l3 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_7; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 7] & MASK32_4) << 17; + l4 |= (tmp[tmpIdx + 8] & MASK32_11) << 6; + l4 |= (tmp[tmpIdx + 9] >>> 5) & MASK32_6; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 9] & MASK32_5) << 16; + l5 |= (tmp[tmpIdx + 10] & MASK32_11) << 5; + l5 |= (tmp[tmpIdx + 11] >>> 6) & MASK32_5; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 11] & MASK32_6) << 15; + l6 |= (tmp[tmpIdx + 12] & MASK32_11) << 4; + l6 |= (tmp[tmpIdx + 13] >>> 7) & MASK32_4; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 13] & MASK32_7) << 14; + l7 |= (tmp[tmpIdx + 14] & MASK32_11) << 3; + l7 |= (tmp[tmpIdx + 15] >>> 8) & MASK32_3; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 15] & MASK32_8) << 13; + l8 |= (tmp[tmpIdx + 16] & MASK32_11) << 2; + l8 |= (tmp[tmpIdx + 17] >>> 9) & MASK32_2; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 17] & MASK32_9) << 12; + l9 |= (tmp[tmpIdx + 18] & MASK32_11) << 1; + l9 |= (tmp[tmpIdx + 19] >>> 10) & MASK32_1; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 19] & MASK32_10) << 11; + l10 |= (tmp[tmpIdx + 20] & MASK32_11) << 0; + longs[longsIdx + 10] = l10; + } + } + + private static void decode22(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 44); + shiftLongs(tmp, 44, longs, 0, 10, MASK32_22); + for (int iter = 0, tmpIdx = 0, longsIdx = 44; iter < 4; ++iter, tmpIdx += 11, longsIdx += 5) { + long l0 = (tmp[tmpIdx + 0] & MASK32_10) << 12; + l0 |= (tmp[tmpIdx + 1] & MASK32_10) << 2; + l0 |= (tmp[tmpIdx + 2] >>> 8) & MASK32_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK32_8) << 14; + l1 |= (tmp[tmpIdx + 3] & MASK32_10) << 4; + l1 |= (tmp[tmpIdx + 4] >>> 6) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 4] & MASK32_6) << 16; + l2 |= (tmp[tmpIdx + 5] & MASK32_10) << 6; + l2 |= (tmp[tmpIdx + 6] >>> 4) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 6] & MASK32_4) << 18; + l3 |= (tmp[tmpIdx + 7] & MASK32_10) << 8; + l3 |= (tmp[tmpIdx + 8] >>> 2) & MASK32_8; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 8] & MASK32_2) << 20; + l4 |= (tmp[tmpIdx + 9] & MASK32_10) << 10; + l4 |= (tmp[tmpIdx + 10] & MASK32_10) << 0; + longs[longsIdx + 4] = l4; + } + } + + private static void decode23(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 46); + shiftLongs(tmp, 46, longs, 0, 9, MASK32_23); + for (int iter = 0, tmpIdx = 0, longsIdx = 46; iter < 2; ++iter, tmpIdx += 23, longsIdx += 9) { + long l0 = (tmp[tmpIdx + 0] & MASK32_9) << 14; + l0 |= (tmp[tmpIdx + 1] & MASK32_9) << 5; + l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK32_5; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK32_4) << 19; + l1 |= (tmp[tmpIdx + 3] & MASK32_9) << 10; + l1 |= (tmp[tmpIdx + 4] & MASK32_9) << 1; + l1 |= (tmp[tmpIdx + 5] >>> 8) & MASK32_1; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 5] & MASK32_8) << 15; + l2 |= (tmp[tmpIdx + 6] & MASK32_9) << 6; + l2 |= (tmp[tmpIdx + 7] >>> 3) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 7] & MASK32_3) << 20; + l3 |= (tmp[tmpIdx + 8] & MASK32_9) << 11; + l3 |= (tmp[tmpIdx + 9] & MASK32_9) << 2; + l3 |= (tmp[tmpIdx + 10] >>> 7) & MASK32_2; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 10] & MASK32_7) << 16; + l4 |= (tmp[tmpIdx + 11] & MASK32_9) << 7; + l4 |= (tmp[tmpIdx + 12] >>> 2) & MASK32_7; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 12] & MASK32_2) << 21; + l5 |= (tmp[tmpIdx + 13] & MASK32_9) << 12; + l5 |= (tmp[tmpIdx + 14] & MASK32_9) << 3; + l5 |= (tmp[tmpIdx + 15] >>> 6) & MASK32_3; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 15] & MASK32_6) << 17; + l6 |= (tmp[tmpIdx + 16] & MASK32_9) << 8; + l6 |= (tmp[tmpIdx + 17] >>> 1) & MASK32_8; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 17] & MASK32_1) << 22; + l7 |= (tmp[tmpIdx + 18] & MASK32_9) << 13; + l7 |= (tmp[tmpIdx + 19] & MASK32_9) << 4; + l7 |= (tmp[tmpIdx + 20] >>> 5) & MASK32_4; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 20] & MASK32_5) << 18; + l8 |= (tmp[tmpIdx + 21] & MASK32_9) << 9; + l8 |= (tmp[tmpIdx + 22] & MASK32_9) << 0; + longs[longsIdx + 8] = l8; + } + } + + private static void decode24(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 48); + shiftLongs(tmp, 48, longs, 0, 8, MASK32_24); + shiftLongs(tmp, 48, tmp, 0, 0, MASK32_8); + for (int iter = 0, tmpIdx = 0, longsIdx = 48; iter < 16; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 16; + l0 |= tmp[tmpIdx + 1] << 8; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index dfdbafe6eb2a..6f5716c88031 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -13,17 +13,22 @@ import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.SegmentInfos; import org.elasticsearch.common.lucene.FilterIndexCommit; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogDeletionPolicy; import java.io.IOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.LongSupplier; +import java.util.stream.Collectors; /** * An {@link IndexDeletionPolicy} that coordinates between Lucene's commits and the retention of translog generation files, @@ -38,6 +43,17 @@ public class CombinedDeletionPolicy extends IndexDeletionPolicy { private final SoftDeletesPolicy softDeletesPolicy; private final LongSupplier globalCheckpointSupplier; private final Map snapshottedCommits; // Number of snapshots held against each commit point. + + interface CommitsListener { + + void onNewAcquiredCommit(IndexCommit commit, Set additionalFiles); + + void onDeletedCommit(IndexCommit commit); + } + + @Nullable + private final CommitsListener commitsListener; + private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile long maxSeqNoOfNextSafeCommit; private volatile IndexCommit lastCommit; // the most recent commit point @@ -47,12 +63,14 @@ public class CombinedDeletionPolicy extends IndexDeletionPolicy { Logger logger, TranslogDeletionPolicy translogDeletionPolicy, SoftDeletesPolicy softDeletesPolicy, - LongSupplier globalCheckpointSupplier + LongSupplier globalCheckpointSupplier, + @Nullable CommitsListener commitsListener ) { this.logger = logger; this.translogDeletionPolicy = translogDeletionPolicy; this.softDeletesPolicy = softDeletesPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.commitsListener = commitsListener; this.snapshottedCommits = new HashMap<>(); } @@ -86,11 +104,15 @@ public void onCommit(List commits) throws IOException { logger.info("failed to get the total docs from the safe commit; use the total docs from the previous safe commit", ex); totalDocsOfSafeCommit = safeCommitInfo.docCount; } + IndexCommit newCommit = null; + IndexCommit previousLastCommit = null; + List deletedCommits = null; synchronized (this) { this.safeCommitInfo = new SafeCommitInfo( Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), totalDocsOfSafeCommit ); + previousLastCommit = this.lastCommit; this.lastCommit = commits.get(commits.size() - 1); this.safeCommit = safeCommit; updateRetentionPolicy(); @@ -99,13 +121,32 @@ public void onCommit(List commits) throws IOException { } else { this.maxSeqNoOfNextSafeCommit = Long.parseLong(commits.get(keptPosition + 1).getUserData().get(SequenceNumbers.MAX_SEQ_NO)); } + if (commitsListener != null && previousLastCommit != this.lastCommit) { + newCommit = acquireIndexCommit(false); + } for (int i = 0; i < keptPosition; i++) { - if (snapshottedCommits.containsKey(commits.get(i)) == false) { - deleteCommit(commits.get(i)); + final IndexCommit commit = commits.get(i); + if (snapshottedCommits.containsKey(commit) == false) { + deleteCommit(commit); + if (deletedCommits == null) { + deletedCommits = new ArrayList<>(); + } + deletedCommits.add(commit); } } } assert assertSafeCommitUnchanged(safeCommit); + if (commitsListener != null) { + if (newCommit != null) { + final Set additionalFiles = listOfNewFileNames(previousLastCommit, newCommit); + commitsListener.onNewAcquiredCommit(newCommit, additionalFiles); + } + if (deletedCommits != null) { + for (IndexCommit deletedCommit : deletedCommits) { + commitsListener.onDeletedCommit(deletedCommit); + } + } + } } private boolean assertSafeCommitUnchanged(IndexCommit safeCommit) { @@ -154,7 +195,11 @@ synchronized IndexCommit acquireIndexCommit(boolean acquiringSafeCommit) { assert lastCommit != null : "Last commit is not initialized yet"; final IndexCommit snapshotting = acquiringSafeCommit ? safeCommit : lastCommit; snapshottedCommits.merge(snapshotting, 1, Integer::sum); // increase refCount - return new SnapshotIndexCommit(snapshotting); + return wrapCommit(snapshotting); + } + + protected IndexCommit wrapCommit(IndexCommit indexCommit) { + return new SnapshotIndexCommit(indexCommit); } /** @@ -226,6 +271,11 @@ private static int indexOfKeptCommits(List commits, long return 0; } + private Set listOfNewFileNames(IndexCommit previous, IndexCommit current) throws IOException { + final Set previousFiles = previous != null ? new HashSet<>(previous.getFileNames()) : Set.of(); + return current.getFileNames().stream().filter(f -> previousFiles.contains(f) == false).collect(Collectors.toUnmodifiableSet()); + } + /** * Checks whether the deletion policy is holding on to snapshotted commits */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/CompletionStatsCache.java b/server/src/main/java/org/elasticsearch/index/engine/CompletionStatsCache.java index 03096c88f0c9..f66b85647189 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CompletionStatsCache.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CompletionStatsCache.java @@ -25,7 +25,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -class CompletionStatsCache implements ReferenceManager.RefreshListener { +public class CompletionStatsCache implements ReferenceManager.RefreshListener { private final Supplier searcherSupplier; @@ -37,11 +37,11 @@ class CompletionStatsCache implements ReferenceManager.RefreshListener { */ private final AtomicReference> completionStatsFutureRef = new AtomicReference<>(); - CompletionStatsCache(Supplier searcherSupplier) { + public CompletionStatsCache(Supplier searcherSupplier) { this.searcherSupplier = searcherSupplier; } - CompletionStats get(String... fieldNamePatterns) { + public CompletionStats get(String... fieldNamePatterns) { final PlainActionFuture newFuture = new PlainActionFuture<>(); final PlainActionFuture oldFuture = completionStatsFutureRef.compareAndExchange(null, newFuture); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java index f5041ec69ebd..ef3b402a74c8 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchReaderManager.java @@ -25,7 +25,7 @@ * */ @SuppressForbidden(reason = "reference counting is required here") -class ElasticsearchReaderManager extends ReferenceManager { +public class ElasticsearchReaderManager extends ReferenceManager { /** * Creates and returns a new ElasticsearchReaderManager from the given @@ -34,7 +34,7 @@ class ElasticsearchReaderManager extends ReferenceManager additionalFiles); + + /** + * This method is invoked after the policy deleted the given {@link IndexCommit}. A listener is never notified of a deleted commit + * until the corresponding {@link Engine.IndexCommitRef} received through {@link #onNewCommit} has been closed; closing which in + * turn can call this method directly. + * + * @param shardId the {@link ShardId} of shard + * @param deletedCommit the deleted {@link IndexCommit} + */ + void onIndexCommitDelete(ShardId shardId, IndexCommit deletedCommit); + } + /** * A throttling class that can be activated, causing the * {@code acquireThrottle} method to block on a lock when throttling @@ -541,7 +569,7 @@ public boolean isFound() { public static class NoOpResult extends Result { - NoOpResult(long term, long seqNo) { + public NoOpResult(long term, long seqNo) { super(Operation.TYPE.NO_OP, 0, term, seqNo, null); } @@ -557,7 +585,7 @@ protected final GetResult getFromSearcher(Get get, Engine.Searcher searcher, boo if (uncachedLookup) { docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersionUncached(searcher.getIndexReader(), get.uid(), true); } else { - docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.getIndexReader(), get.uid(), true); + docIdAndVersion = VersionsAndSeqNoResolver.timeSeriesLoadDocIdAndVersion(searcher.getIndexReader(), get.uid(), true); } } catch (Exception e) { Releasables.closeWhileHandlingException(searcher); @@ -986,16 +1014,14 @@ public boolean refreshNeeded() { * changes. */ @Nullable - public abstract void refresh(String source) throws EngineException; + public abstract RefreshResult refresh(String source) throws EngineException; /** * Synchronously refreshes the engine for new search operations to reflect the latest * changes unless another thread is already refreshing the engine concurrently. - * - * @return true if the a refresh happened. Otherwise false */ @Nullable - public abstract boolean maybeRefresh(String source) throws EngineException; + public abstract RefreshResult maybeRefresh(String source) throws EngineException; /** * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. @@ -1929,4 +1955,27 @@ public interface TranslogRecoveryRunner { public final EngineConfig getEngineConfig() { return engineConfig; } + + /** + * Allows registering a listener for when the index shard is on a segment generation >= minGeneration. + */ + public void addSegmentGenerationListener(long minGeneration, ActionListener listener) { + throw new UnsupportedOperationException(); + } + + /** + * Captures the result of a refresh operation on the index shard. + *

    + * refreshed is true if a refresh happened. If refreshed, generation + * contains the generation of the index commit that the reader has opened upon refresh. + */ + public record RefreshResult(boolean refreshed, long generation) { + + public static final long UNKNOWN_GENERATION = -1L; + public static final RefreshResult NO_REFRESH = new RefreshResult(false); + + public RefreshResult(boolean refreshed) { + this(refreshed, UNKNOWN_GENERATION); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index e8a2bf0aa58d..940ddf1373e5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -125,6 +125,11 @@ public Supplier retentionLeasesSupplier() { private final LongSupplier relativeTimeInNanosSupplier; + @Nullable + private final Engine.IndexCommitListener indexCommitListener; + + private final boolean promotableToPrimary; + /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ @@ -152,7 +157,9 @@ public EngineConfig( LongSupplier primaryTermSupplier, IndexStorePlugin.SnapshotCommitSupplier snapshotCommitSupplier, Comparator leafSorter, - LongSupplier relativeTimeInNanosSupplier + LongSupplier relativeTimeInNanosSupplier, + Engine.IndexCommitListener indexCommitListener, + boolean promotableToPrimary ) { this.shardId = shardId; this.indexSettings = indexSettings; @@ -193,6 +200,8 @@ public EngineConfig( this.snapshotCommitSupplier = snapshotCommitSupplier; this.leafSorter = leafSorter; this.relativeTimeInNanosSupplier = relativeTimeInNanosSupplier; + this.indexCommitListener = indexCommitListener; + this.promotableToPrimary = promotableToPrimary; } /** @@ -236,6 +245,13 @@ public Codec getCodec() { return codecService.codec(codecName); } + /** + * @return the {@link CodecService} + */ + public CodecService getCodecService() { + return codecService; + } + /** * Returns a thread-pool mainly used to get estimated time stamps from * {@link org.elasticsearch.threadpool.ThreadPool#relativeTimeInMillis()} and to schedule @@ -395,4 +411,16 @@ public Comparator getLeafSorter() { public LongSupplier getRelativeTimeInNanosSupplier() { return relativeTimeInNanosSupplier; } + + @Nullable + public Engine.IndexCommitListener getIndexCommitListener() { + return indexCommitListener; + } + + /** + * @return whether the engine should be configured so that it can be promoted to primary in future + */ + public boolean isPromotableToPrimary() { + return promotableToPrimary; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/IdStoredFieldLoader.java b/server/src/main/java/org/elasticsearch/index/engine/IdStoredFieldLoader.java index 734678417c6b..61f9f2c0f24d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/IdStoredFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/IdStoredFieldLoader.java @@ -35,7 +35,7 @@ final class IdStoredFieldLoader { private static CheckedBiConsumer getStoredFieldsReader(LeafReader in) { if (in instanceof SequentialStoredFieldsLeafReader) { - return (((SequentialStoredFieldsLeafReader) in).getSequentialStoredFieldsReader())::visitDocument; + return (((SequentialStoredFieldsLeafReader) in).getSequentialStoredFieldsReader())::document; } throw new IllegalArgumentException("Requires a SequentialStoredFieldsReader, got " + in.getClass()); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a3cd492918ff..84696f339e8f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -42,6 +42,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.lucene.LoggerInfoStream; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -59,6 +60,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; @@ -105,6 +107,7 @@ import java.util.function.Function; import java.util.function.LongConsumer; import java.util.function.LongSupplier; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -226,7 +229,8 @@ public InternalEngine(EngineConfig engineConfig) { logger, translogDeletionPolicy, softDeletesPolicy, - translog::getLastSyncedGlobalCheckpoint + translog::getLastSyncedGlobalCheckpoint, + newCommitsListener() ); this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier); writer = createWriter(); @@ -322,6 +326,28 @@ private SoftDeletesPolicy newSoftDeletesPolicy() throws IOException { ); } + @Nullable + private CombinedDeletionPolicy.CommitsListener newCommitsListener() { + final Engine.IndexCommitListener listener = engineConfig.getIndexCommitListener(); + if (listener != null) { + var primaryTerm = config().getPrimaryTermSupplier().getAsLong(); + return new CombinedDeletionPolicy.CommitsListener() { + @Override + public void onNewAcquiredCommit(final IndexCommit commit, final Set additionalFiles) { + final IndexCommitRef indexCommitRef = acquireIndexCommitRef(() -> commit); + assert indexCommitRef.getIndexCommit() == commit; + listener.onNewCommit(shardId, store, primaryTerm, indexCommitRef, additionalFiles); + } + + @Override + public void onDeletedCommit(IndexCommit commit) { + listener.onIndexCommitDelete(shardId, commit); + } + }; + } + return null; + } + @Override public CompletionStats completionStats(String... fieldNamePatterns) { return completionStatsCache.get(fieldNamePatterns); @@ -812,7 +838,21 @@ private VersionValue resolveDocVersion(final Operation op, boolean loadSeqNo) th assert incrementIndexVersionLookup(); // used for asserting in tests final VersionsAndSeqNoResolver.DocIdAndVersion docIdAndVersion; try (Searcher searcher = acquireSearcher("load_version", SearcherScope.INTERNAL)) { - docIdAndVersion = VersionsAndSeqNoResolver.loadDocIdAndVersion(searcher.getIndexReader(), op.uid(), loadSeqNo); + if (engineConfig.getIndexSettings().getMode() == IndexMode.TIME_SERIES) { + assert engineConfig.getLeafSorter() == DataStream.TIMESERIES_LEAF_READERS_SORTER; + docIdAndVersion = VersionsAndSeqNoResolver.timeSeriesLoadDocIdAndVersion( + searcher.getIndexReader(), + op.uid(), + op.id(), + loadSeqNo + ); + } else { + docIdAndVersion = VersionsAndSeqNoResolver.timeSeriesLoadDocIdAndVersion( + searcher.getIndexReader(), + op.uid(), + loadSeqNo + ); + } } if (docIdAndVersion != null) { versionValue = new IndexVersionValue(null, docIdAndVersion.version, docIdAndVersion.seqNo, docIdAndVersion.primaryTerm); @@ -1792,20 +1832,21 @@ protected Optional preFlightCheckForNoOp(final NoOp noOp) throws IOEx } @Override - public void refresh(String source) throws EngineException { - refresh(source, SearcherScope.EXTERNAL, true); + public RefreshResult refresh(String source) throws EngineException { + return refresh(source, SearcherScope.EXTERNAL, true); } @Override - public boolean maybeRefresh(String source) throws EngineException { + public RefreshResult maybeRefresh(String source) throws EngineException { return refresh(source, SearcherScope.EXTERNAL, false); } - final boolean refresh(String source, SearcherScope scope, boolean block) throws EngineException { + final RefreshResult refresh(String source, SearcherScope scope, boolean block) throws EngineException { // both refresh types will result in an internal refresh but only the external will also // pass the new reader reference to the external reader manager. final long localCheckpointBeforeRefresh = localCheckpointTracker.getProcessedCheckpoint(); boolean refreshed; + long segmentGeneration = RefreshResult.UNKNOWN_GENERATION; try { // refresh does not need to hold readLock as ReferenceManager can handle correctly if the engine is closed in mid-way. if (store.tryIncRef()) { @@ -1821,6 +1862,14 @@ final boolean refresh(String source, SearcherScope scope, boolean block) throws } else { refreshed = referenceManager.maybeRefresh(); } + if (refreshed) { + final ElasticsearchDirectoryReader current = referenceManager.acquire(); + try { + segmentGeneration = current.getIndexCommit().getGeneration(); + } finally { + referenceManager.release(current); + } + } } finally { store.decRef(); } @@ -1852,7 +1901,7 @@ final boolean refresh(String source, SearcherScope scope, boolean block) throws // for a long time: maybePruneDeletes(); mergeScheduler.refreshConfig(); - return refreshed; + return new RefreshResult(refreshed, segmentGeneration); } @Override @@ -1928,10 +1977,9 @@ public boolean flush(boolean force, boolean waitIfOngoing) throws EngineExceptio // newly created commit points to a different translog generation (can free translog), // or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries. boolean hasUncommittedChanges = indexWriter.hasUncommittedChanges(); - boolean shouldPeriodicallyFlush = shouldPeriodicallyFlush(); if (hasUncommittedChanges || force - || shouldPeriodicallyFlush + || shouldPeriodicallyFlush() || getProcessedLocalCheckpoint() > Long.parseLong( lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) )) { @@ -1941,15 +1989,6 @@ public boolean flush(boolean force, boolean waitIfOngoing) throws EngineExceptio logger.trace("starting commit for flush; commitTranslog=true"); commitIndexWriter(indexWriter, translog); logger.trace("finished commit for flush"); - - // a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved - logger.debug( - "new commit on flush, hasUncommittedChanges:{}, force:{}, shouldPeriodicallyFlush:{}", - hasUncommittedChanges, - force, - shouldPeriodicallyFlush - ); - // we need to refresh in order to clear older version values refresh("version_table_flush", SearcherScope.INTERNAL, true); translog.trimUnreferencedReaders(); @@ -2158,22 +2197,14 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu } } - @Override - public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws EngineException { - // we have to flush outside of the readlock otherwise we might have a problem upgrading - // the to a write lock when we fail the engine in this operation - if (flushFirst) { - logger.trace("start flush for snapshot"); - flush(false, true); - logger.trace("finish flush for snapshot"); - } + private IndexCommitRef acquireIndexCommitRef(final Supplier indexCommitSupplier) { store.incRef(); boolean success = false; try { - final IndexCommit lastCommit = combinedDeletionPolicy.acquireIndexCommit(false); + final IndexCommit indexCommit = indexCommitSupplier.get(); final IndexCommitRef commitRef = new IndexCommitRef( - lastCommit, - () -> IOUtils.close(() -> releaseIndexCommit(lastCommit), store::decRef) + indexCommit, + () -> IOUtils.close(() -> releaseIndexCommit(indexCommit), store::decRef) ); success = true; return commitRef; @@ -2185,22 +2216,20 @@ public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws En } @Override - public IndexCommitRef acquireSafeIndexCommit() throws EngineException { - store.incRef(); - boolean success = false; - try { - final IndexCommit safeCommit = combinedDeletionPolicy.acquireIndexCommit(true); - final IndexCommitRef commitRef = new IndexCommitRef( - safeCommit, - () -> IOUtils.close(() -> releaseIndexCommit(safeCommit), store::decRef) - ); - success = true; - return commitRef; - } finally { - if (success == false) { - store.decRef(); - } + public IndexCommitRef acquireLastIndexCommit(final boolean flushFirst) throws EngineException { + // we have to flush outside of the readlock otherwise we might have a problem upgrading + // the to a write lock when we fail the engine in this operation + if (flushFirst) { + logger.trace("start flush for snapshot"); + flush(false, true); + logger.trace("finish flush for snapshot"); } + return acquireIndexCommitRef(() -> combinedDeletionPolicy.acquireIndexCommit(false)); + } + + @Override + public IndexCommitRef acquireSafeIndexCommit() throws EngineException { + return acquireIndexCommitRef(() -> combinedDeletionPolicy.acquireIndexCommit(true)); } private void releaseIndexCommit(IndexCommit snapshot) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index f673b693df6c..b81709aae8cb 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -338,7 +338,7 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException { assert singleConsumer : "Sequential access optimization must not be enabled for multiple consumers"; assert parallelArray.useSequentialStoredFieldsReader; assert storedFieldsReaderOrd == leaf.ord : storedFieldsReaderOrd + " != " + leaf.ord; - storedFieldsReader.visitDocument(segmentDocID, fields); + storedFieldsReader.document(segmentDocID, fields); } else { leaf.reader().document(segmentDocID, fields); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java index b4d6c5ebaa00..e8ae9d605b3f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/NoOpEngine.java @@ -17,12 +17,15 @@ import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogDeletionPolicy; +import org.elasticsearch.index.translog.TranslogStats; import java.io.IOException; import java.io.UncheckedIOException; @@ -43,7 +46,21 @@ public final class NoOpEngine extends ReadOnlyEngine { private final DocsStats docsStats; public NoOpEngine(EngineConfig config) { - super(config, null, null, true, Function.identity(), true, true); + this( + config, + config.isPromotableToPrimary() ? null : new TranslogStats(0, 0, 0, 0, 0), + config.isPromotableToPrimary() + ? null + : new SeqNoStats( + config.getGlobalCheckpointSupplier().getAsLong(), + config.getGlobalCheckpointSupplier().getAsLong(), + config.getGlobalCheckpointSupplier().getAsLong() + ) + ); + } + + public NoOpEngine(EngineConfig config, @Nullable TranslogStats translogStats, SeqNoStats seqNoStats) { + super(config, seqNoStats, translogStats, true, Function.identity(), true, true); this.segmentsStats = new SegmentsStats(); Directory directory = store.directory(); try (DirectoryReader reader = openDirectory(directory)) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index e47c3cf984c2..7d66c96bd4c1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -418,14 +418,15 @@ public List segments() { } @Override - public void refresh(String source) { + public RefreshResult refresh(String source) { // we could allow refreshes if we want down the road the reader manager will then reflect changes to a rw-engine // opened side-by-side + return RefreshResult.NO_REFRESH; } @Override - public boolean maybeRefresh(String source) throws EngineException { - return false; + public RefreshResult maybeRefresh(String source) throws EngineException { + return RefreshResult.NO_REFRESH; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 1a0b8531d7fb..05c5031edbdd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -195,8 +195,8 @@ public void close() throws IOException { } @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { - in.visitDocument(docID, visitor); + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + in.document(docID, visitor); } @Override @@ -220,11 +220,11 @@ private static class RecoverySourcePruningStoredFieldsReader extends FilterStore } @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + public void document(int docID, StoredFieldVisitor visitor) throws IOException { if (recoverySourceToKeep != null && recoverySourceToKeep.get(docID)) { - super.visitDocument(docID, visitor); + super.document(docID, visitor); } else { - super.visitDocument(docID, new FilterStoredFieldVisitor(visitor) { + super.document(docID, new FilterStoredFieldVisitor(visitor) { @Override public Status needsField(FieldInfo fieldInfo) throws IOException { if (recoverySourceField.equals(fieldInfo.name)) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index 8bff90cf5856..88dbe7d454a3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -14,7 +14,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -52,7 +52,7 @@ public Segment(StreamInput in) throws IOException { version = Lucene.parseVersionLenient(in.readOptionalString(), null); compound = in.readOptionalBoolean(); mergeId = in.readOptionalString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readLong(); // memoryInBytes } if (in.readBoolean()) { @@ -159,7 +159,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(version.toString()); out.writeOptionalBoolean(compound); out.writeOptionalString(mergeId); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeLong(0); // memoryInBytes } @@ -252,7 +252,7 @@ private static void writeSegmentSort(StreamOutput out, Sort sort) throws IOExcep o.writeBoolean(((SortedNumericSortField) field).getSelector() == SortedNumericSelector.Type.MAX); o.writeBoolean(field.getReverse()); } else if (field.getType().equals(SortField.Type.STRING)) { - if (o.getVersion().before(Version.V_8_5_0)) { + if (o.getTransportVersion().before(TransportVersion.V_8_5_0)) { // The closest supported version before 8.5.0 was SortedSet fields, so we mimic that o.writeByte(SORT_STRING_SET); o.writeOptionalBoolean(field.getMissingValue() == null ? null : field.getMissingValue() == SortField.STRING_FIRST); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 321effc55730..5a2edcb05978 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.engine; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,7 +39,7 @@ public SegmentsStats() { public SegmentsStats(StreamInput in) throws IOException { count = in.readVLong(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readLong(); // memoryInBytes in.readLong(); // termsMemoryInBytes in.readLong(); // storedFieldsMemoryInBytes @@ -220,7 +220,7 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeLong(0L); // memoryInBytes out.writeLong(0L); // termsMemoryInBytes out.writeLong(0L); // storedFieldsMemoryInBytes @@ -250,7 +250,7 @@ public static class FileStats implements Writeable, ToXContentFragment { private final long max; FileStats(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { this.ext = in.readString(); this.total = in.readVLong(); this.count = in.readVLong(); @@ -295,7 +295,7 @@ public long getMax() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { out.writeString(ext); out.writeVLong(total); out.writeVLong(count); diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index c377fdd73f3d..c6ac89f1a8e5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -11,11 +11,13 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.BaseTermsEnum; import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.ByteVectorValues; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FloatVectorValues; import org.apache.lucene.index.ImpactsEnum; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexOptions; @@ -30,11 +32,12 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.ByteBuffersDirectory; @@ -345,8 +348,13 @@ public NumericDocValues getNormValues(String field) throws IOException { } @Override - public VectorValues getVectorValues(String field) throws IOException { - return getDelegate().getVectorValues(field); + public FloatVectorValues getFloatVectorValues(String field) throws IOException { + return getDelegate().getFloatVectorValues(field); + } + + @Override + public ByteVectorValues getByteVectorValues(String field) throws IOException { + return getDelegate().getByteVectorValues(field); } @Override @@ -354,6 +362,11 @@ public TopDocs searchNearestVectors(String field, float[] target, int k, Bits ac return getDelegate().searchNearestVectors(field, target, k, acceptDocs, visitedLimit); } + @Override + public TopDocs searchNearestVectors(String field, byte[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + return getDelegate().searchNearestVectors(field, target, k, acceptDocs, visitedLimit); + } + @Override public FieldInfos getFieldInfos() { return getDelegate().getFieldInfos(); @@ -382,6 +395,16 @@ public Fields getTermVectors(int docID) throws IOException { return getDelegate().getTermVectors(docID); } + @Override + public TermVectors termVectors() throws IOException { + return getDelegate().termVectors(); + } + + @Override + public StoredFields storedFields() throws IOException { + return getDelegate().storedFields(); + } + @Override public int numDocs() { return 1; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherIndexFieldData.java index 8d24977a9c22..47f47a763e5b 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherIndexFieldData.java @@ -17,7 +17,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.SourceProvider; import org.elasticsearch.search.sort.BucketedSort; import org.elasticsearch.search.sort.SortOrder; @@ -30,20 +30,20 @@ public abstract static class Builder implements IndexFieldData.Builder { protected final String fieldName; protected final ValuesSourceType valuesSourceType; protected final ValueFetcher valueFetcher; - protected final SourceLookup sourceLookup; + protected final SourceProvider sourceProvider; protected final ToScriptFieldFactory toScriptFieldFactory; public Builder( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { this.fieldName = fieldName; this.valuesSourceType = valuesSourceType; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; this.toScriptFieldFactory = toScriptFieldFactory; } } @@ -51,20 +51,20 @@ public Builder( protected final String fieldName; protected final ValuesSourceType valuesSourceType; protected final ValueFetcher valueFetcher; - protected final SourceLookup sourceLookup; + protected final SourceProvider sourceProvider; protected final ToScriptFieldFactory toScriptFieldFactory; protected SourceValueFetcherIndexFieldData( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { this.fieldName = fieldName; this.valuesSourceType = valuesSourceType; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; this.toScriptFieldFactory = toScriptFieldFactory; } @@ -112,18 +112,18 @@ public abstract static class SourceValueFetcherLeafFieldData implements LeafF protected final LeafReaderContext leafReaderContext; protected final ValueFetcher valueFetcher; - protected final SourceLookup sourceLookup; + protected final SourceProvider sourceProvider; public SourceValueFetcherLeafFieldData( ToScriptFieldFactory toScriptFieldFactory, LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { this.toScriptFieldFactory = toScriptFieldFactory; this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java index 0d79eb51d043..efa99c5d3f9f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java @@ -15,7 +15,8 @@ import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.ToScriptFieldFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; import java.io.IOException; import java.util.Collections; @@ -30,10 +31,10 @@ public Builder( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override @@ -42,7 +43,7 @@ public SourceValueFetcherMultiGeoPointIndexFieldData build(IndexFieldDataCache c fieldName, valuesSourceType, valueFetcher, - sourceLookup, + sourceProvider, toScriptFieldFactory ); } @@ -52,15 +53,15 @@ protected SourceValueFetcherMultiGeoPointIndexFieldData( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override public SourceValueFetcherMultiGeoPointLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new SourceValueFetcherMultiGeoPointLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + return new SourceValueFetcherMultiGeoPointLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider); } public static class SourceValueFetcherMultiGeoPointLeafFieldData extends @@ -70,15 +71,15 @@ public SourceValueFetcherMultiGeoPointLeafFieldData( ToScriptFieldFactory toScriptFieldFactory, LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { - super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceProvider); } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { return toScriptFieldFactory.getScriptFieldFactory( - new MultiGeoPointValues(new SourceValueFetcherMultiGeoPointDocValues(leafReaderContext, valueFetcher, sourceLookup)), + new MultiGeoPointValues(new SourceValueFetcherMultiGeoPointDocValues(leafReaderContext, valueFetcher, sourceProvider)), name ); } @@ -90,18 +91,17 @@ public static class SourceValueFetcherMultiGeoPointDocValues extends public SourceValueFetcherMultiGeoPointDocValues( LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { - super(leafReaderContext, valueFetcher, sourceLookup); + super(leafReaderContext, valueFetcher, sourceProvider); } @Override @SuppressWarnings("unchecked") public boolean advanceExact(int doc) throws IOException { - sourceLookup.setSegmentAndDocument(leafReaderContext, doc); values.clear(); - - for (Object value : valueFetcher.fetchValues(sourceLookup, doc, Collections.emptyList())) { + Source source = sourceProvider.getSource(leafReaderContext, doc); + for (Object value : valueFetcher.fetchValues(source, doc, Collections.emptyList())) { assert value instanceof Map && ((Map) value).get("coordinates") instanceof List; List coordinates = ((Map>) value).get("coordinates"); assert coordinates.size() == 2 && coordinates.get(1) instanceof Number && coordinates.get(0) instanceof Number; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java index 6ba6a15cde11..cc47e796008c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java @@ -15,7 +15,8 @@ import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.ToScriptFieldFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; import java.io.IOException; import java.util.Collections; @@ -31,10 +32,10 @@ public Builder( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override @@ -43,7 +44,7 @@ public SourceValueFetcherSortedBinaryIndexFieldData build(IndexFieldDataCache ca fieldName, valuesSourceType, valueFetcher, - sourceLookup, + sourceProvider, toScriptFieldFactory ); } @@ -53,15 +54,15 @@ protected SourceValueFetcherSortedBinaryIndexFieldData( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override public SourceValueFetcherSortedBinaryLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new SourceValueFetcherSortedBinaryLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + return new SourceValueFetcherSortedBinaryLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider); } public static class SourceValueFetcherSortedBinaryLeafFieldData extends SourceValueFetcherLeafFieldData { @@ -70,15 +71,15 @@ public SourceValueFetcherSortedBinaryLeafFieldData( ToScriptFieldFactory toScriptFieldFactory, LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { - super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceProvider); } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { return toScriptFieldFactory.getScriptFieldFactory( - new SourceValueFetcherSortedBinaryDocValues(leafReaderContext, valueFetcher, sourceLookup), + new SourceValueFetcherSortedBinaryDocValues(leafReaderContext, valueFetcher, sourceProvider), name ); } @@ -89,7 +90,7 @@ public static class SourceValueFetcherSortedBinaryDocValues extends SortedBinary private final LeafReaderContext leafReaderContext; private final ValueFetcher valueFetcher; - private final SourceLookup sourceLookup; + private final SourceProvider sourceProvider; private final SortedSet values; private Iterator iterator; @@ -97,21 +98,20 @@ public static class SourceValueFetcherSortedBinaryDocValues extends SortedBinary public SourceValueFetcherSortedBinaryDocValues( LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; values = new TreeSet<>(); } @Override public boolean advanceExact(int doc) throws IOException { - sourceLookup.setSegmentAndDocument(leafReaderContext, doc); values.clear(); - - for (Object object : valueFetcher.fetchValues(sourceLookup, doc, Collections.emptyList())) { + Source source = sourceProvider.getSource(leafReaderContext, doc); + for (Object object : valueFetcher.fetchValues(source, doc, Collections.emptyList())) { values.add(new BytesRef(object.toString())); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java index 7fec75ce7b64..5cf486a35274 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java @@ -15,7 +15,8 @@ import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.ToScriptFieldFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; import java.io.IOException; import java.util.Collections; @@ -28,10 +29,10 @@ public Builder( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override @@ -40,7 +41,7 @@ public SourceValueFetcherSortedBooleanIndexFieldData build(IndexFieldDataCache c fieldName, valuesSourceType, valueFetcher, - sourceLookup, + sourceProvider, toScriptFieldFactory ); } @@ -50,15 +51,15 @@ protected SourceValueFetcherSortedBooleanIndexFieldData( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override public SourceValueFetcherLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new SourceValueFetcherSortedBooleanLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + return new SourceValueFetcherSortedBooleanLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider); } private static class SourceValueFetcherSortedBooleanLeafFieldData extends SourceValueFetcherLeafFieldData { @@ -67,15 +68,15 @@ private SourceValueFetcherSortedBooleanLeafFieldData( ToScriptFieldFactory toScriptFieldFactory, LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { - super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceProvider); } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { return toScriptFieldFactory.getScriptFieldFactory( - new SourceValueFetcherSortedBooleanDocValues(leafReaderContext, valueFetcher, sourceLookup), + new SourceValueFetcherSortedBooleanDocValues(leafReaderContext, valueFetcher, sourceProvider), name ); } @@ -86,7 +87,7 @@ static class SourceValueFetcherSortedBooleanDocValues extends SortedNumericDocVa private final LeafReaderContext leafReaderContext; private final ValueFetcher valueFetcher; - private final SourceLookup sourceLookup; + private final SourceProvider sourceProvider; private int trueCount; private int falseCount; @@ -95,21 +96,19 @@ static class SourceValueFetcherSortedBooleanDocValues extends SortedNumericDocVa SourceValueFetcherSortedBooleanDocValues( LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; } @Override public boolean advanceExact(int doc) throws IOException { - sourceLookup.setSegmentAndDocument(leafReaderContext, doc); - trueCount = 0; falseCount = 0; - - for (Object value : valueFetcher.fetchValues(sourceLookup, doc, Collections.emptyList())) { + Source source = sourceProvider.getSource(leafReaderContext, doc); + for (Object value : valueFetcher.fetchValues(source, doc, Collections.emptyList())) { assert value instanceof Boolean; if ((Boolean) value) { ++trueCount; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java index ee3b28bf9641..c1659441f92d 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java @@ -14,7 +14,8 @@ import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.ToScriptFieldFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; import java.io.IOException; import java.util.ArrayList; @@ -30,10 +31,10 @@ public Builder( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override @@ -42,7 +43,7 @@ public SourceValueFetcherSortedDoubleIndexFieldData build(IndexFieldDataCache ca fieldName, valuesSourceType, valueFetcher, - sourceLookup, + sourceProvider, toScriptFieldFactory ); } @@ -52,15 +53,15 @@ protected SourceValueFetcherSortedDoubleIndexFieldData( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override public SourceValueFetcherLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new SourceValueFetcherSortedDoubleLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + return new SourceValueFetcherSortedDoubleLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider); } private static class SourceValueFetcherSortedDoubleLeafFieldData extends SourceValueFetcherLeafFieldData { @@ -69,15 +70,15 @@ private SourceValueFetcherSortedDoubleLeafFieldData( ToScriptFieldFactory toScriptFieldFactory, LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { - super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceProvider); } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { return toScriptFieldFactory.getScriptFieldFactory( - new SourceValueFetcherSortedNumericDoubleValues(leafReaderContext, valueFetcher, sourceLookup), + new SourceValueFetcherSortedNumericDoubleValues(leafReaderContext, valueFetcher, sourceProvider), name ); } @@ -88,7 +89,7 @@ private static class SourceValueFetcherSortedNumericDoubleValues extends SortedN private final LeafReaderContext leafReaderContext; private final ValueFetcher valueFetcher; - private final SourceLookup sourceLookup; + private final SourceProvider sourceProvider; private final List values; private Iterator iterator; @@ -96,21 +97,20 @@ private static class SourceValueFetcherSortedNumericDoubleValues extends SortedN private SourceValueFetcherSortedNumericDoubleValues( LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; values = new ArrayList<>(); } @Override public boolean advanceExact(int doc) throws IOException { - sourceLookup.setSegmentAndDocument(leafReaderContext, doc); values.clear(); - - for (Object value : valueFetcher.fetchValues(sourceLookup, doc, Collections.emptyList())) { + Source source = sourceProvider.getSource(leafReaderContext, doc); + for (Object value : valueFetcher.fetchValues(source, doc, Collections.emptyList())) { assert value instanceof Number; values.add(((Number) value).doubleValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java index 37c5198c52ac..f3f9446a42af 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java @@ -15,7 +15,8 @@ import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.script.field.ToScriptFieldFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; import java.io.IOException; import java.util.ArrayList; @@ -31,10 +32,10 @@ public Builder( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override @@ -43,7 +44,7 @@ public SourceValueFetcherSortedNumericIndexFieldData build(IndexFieldDataCache c fieldName, valuesSourceType, valueFetcher, - sourceLookup, + sourceProvider, toScriptFieldFactory ); } @@ -53,15 +54,15 @@ protected SourceValueFetcherSortedNumericIndexFieldData( String fieldName, ValuesSourceType valuesSourceType, ValueFetcher valueFetcher, - SourceLookup sourceLookup, + SourceProvider sourceProvider, ToScriptFieldFactory toScriptFieldFactory ) { - super(fieldName, valuesSourceType, valueFetcher, sourceLookup, toScriptFieldFactory); + super(fieldName, valuesSourceType, valueFetcher, sourceProvider, toScriptFieldFactory); } @Override public SourceValueFetcherSortedNumericLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new SourceValueFetcherSortedNumericLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceLookup); + return new SourceValueFetcherSortedNumericLeafFieldData(toScriptFieldFactory, context, valueFetcher, sourceProvider); } public static class SourceValueFetcherSortedNumericLeafFieldData extends SourceValueFetcherLeafFieldData { @@ -70,15 +71,15 @@ public SourceValueFetcherSortedNumericLeafFieldData( ToScriptFieldFactory toScriptFieldFactory, LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { - super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceLookup); + super(toScriptFieldFactory, leafReaderContext, valueFetcher, sourceProvider); } @Override public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { return toScriptFieldFactory.getScriptFieldFactory( - new SourceValueFetcherSortedNumericDocValues(leafReaderContext, valueFetcher, sourceLookup), + new SourceValueFetcherSortedNumericDocValues(leafReaderContext, valueFetcher, sourceProvider), name ); } @@ -89,7 +90,7 @@ public static class SourceValueFetcherSortedNumericDocValues extends SortedNumer protected final LeafReaderContext leafReaderContext; protected final ValueFetcher valueFetcher; - protected final SourceLookup sourceLookup; + protected final SourceProvider sourceProvider; protected final List values; protected Iterator iterator; @@ -97,21 +98,20 @@ public static class SourceValueFetcherSortedNumericDocValues extends SortedNumer public SourceValueFetcherSortedNumericDocValues( LeafReaderContext leafReaderContext, ValueFetcher valueFetcher, - SourceLookup sourceLookup + SourceProvider sourceProvider ) { this.leafReaderContext = leafReaderContext; this.valueFetcher = valueFetcher; - this.sourceLookup = sourceLookup; + this.sourceProvider = sourceProvider; values = new ArrayList<>(); } @Override public boolean advanceExact(int doc) throws IOException { - sourceLookup.setSegmentAndDocument(leafReaderContext, doc); values.clear(); - - for (Object value : valueFetcher.fetchValues(sourceLookup, doc, Collections.emptyList())) { + Source source = sourceProvider.getSource(leafReaderContext, doc); + for (Object value : valueFetcher.fetchValues(source, doc, Collections.emptyList())) { assert value instanceof Number; values.add(((Number) value).longValue()); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptDocValues.java index e42659743521..f5e236e9e864 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptDocValues.java @@ -21,7 +21,8 @@ public final class StringScriptDocValues extends SortingBinaryDocValues { @Override public boolean advanceExact(int docId) { - List results = script.resultsForDoc(docId); + script.runForDoc(docId); + List results = script.getValues(); count = results.size(); if (count == 0) { return false; diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java index 7e2283fec76d..bbb5f40431e6 100644 --- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java +++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/StoredFieldLoader.java @@ -60,6 +60,23 @@ public List fieldsToLoad() { }; } + /** + * Creates a StoredFieldLoader tuned for sequential reads of _source + */ + public static StoredFieldLoader sequentialSource() { + return new StoredFieldLoader() { + @Override + public LeafStoredFieldLoader getLoader(LeafReaderContext ctx, int[] docs) { + return new ReaderStoredFieldLoader(sequentialReader(ctx), true, Set.of()); + } + + @Override + public List fieldsToLoad() { + return List.of(); + } + }; + } + /** * Creates a no-op StoredFieldLoader that will not load any fields from disk */ @@ -82,8 +99,16 @@ private static CheckedBiConsumer reader(Lea if (docs == null) { return leafReader::document; } - if (leafReader instanceof SequentialStoredFieldsLeafReader lf && docs.length > 10 && hasSequentialDocs(docs)) { - return lf.getSequentialStoredFieldsReader()::visitDocument; + if (docs.length > 10 && hasSequentialDocs(docs)) { + return sequentialReader(ctx); + } + return leafReader::document; + } + + private static CheckedBiConsumer sequentialReader(LeafReaderContext ctx) { + LeafReader leafReader = ctx.reader(); + if (leafReader instanceof SequentialStoredFieldsLeafReader lf) { + return lf.getSequentialStoredFieldsReader()::document; } return leafReader::document; } diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index 2d8b6b0572bd..efedb141d246 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -9,7 +9,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -24,7 +24,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.rest.action.document.RestMultiGetAction; -import org.elasticsearch.search.lookup.SourceLookup; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -66,7 +66,7 @@ public class GetResult implements Writeable, Iterable, ToXContent public GetResult(StreamInput in) throws IOException { index = in.readString(); - if (in.getVersion().before(Version.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -225,7 +225,7 @@ public Map sourceAsMap() throws ElasticsearchParseException { return sourceAsMap; } - sourceAsMap = SourceLookup.sourceAsMap(source); + sourceAsMap = Source.fromBytes(source).source(); return sourceAsMap; } @@ -386,7 +386,7 @@ public static GetResult fromXContent(XContentParser parser) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getVersion().before(Version.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index a6db9824c756..c481efeb3a23 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -163,7 +163,7 @@ protected AbstractGeometryFieldMapper( MultiFields multiFields, CopyTo copyTo, Parser parser, - String onScriptError + OnScriptError onScriptError ) { super(simpleName, mappedFieldType, multiFields, copyTo, true, onScriptError); this.ignoreMalformed = Explicit.EXPLICIT_FALSE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 571f0d59a9f2..14db646d8684 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -54,7 +54,7 @@ protected AbstractPointGeometryFieldMapper( MultiFields multiFields, CopyTo copyTo, Parser parser, - String onScriptError + OnScriptError onScriptError ) { super(simpleName, mappedFieldType, multiFields, copyTo, parser, onScriptError); this.nullValue = null; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java index aa0e0c17a52b..b84294168397 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractScriptFieldType.java @@ -215,7 +215,7 @@ public void validateMatchedRoutingPath() { abstract static class Builder extends RuntimeField.Builder { private final ScriptContext scriptContext; - final FieldMapper.Parameter